1 /*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "large_object_space.h"
18
19 #include <sys/mman.h>
20
21 #include <memory>
22
23 #include <android-base/logging.h>
24
25 #include "base/macros.h"
26 #include "base/memory_tool.h"
27 #include "base/mutex-inl.h"
28 #include "base/os.h"
29 #include "base/stl_util.h"
30 #include "gc/accounting/heap_bitmap-inl.h"
31 #include "gc/accounting/space_bitmap-inl.h"
32 #include "gc/heap.h"
33 #include "mirror/object-readbarrier-inl.h"
34 #include "oat/image.h"
35 #include "scoped_thread_state_change-inl.h"
36 #include "space-inl.h"
37 #include "thread-current-inl.h"
38
39 namespace art HIDDEN {
40 namespace gc {
41 namespace space {
42
43 class MemoryToolLargeObjectMapSpace final : public LargeObjectMapSpace {
44 public:
MemoryToolLargeObjectMapSpace(const std::string & name)45 explicit MemoryToolLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
46 }
47
~MemoryToolLargeObjectMapSpace()48 ~MemoryToolLargeObjectMapSpace() override {
49 // Historical note: We were deleting large objects to keep Valgrind happy if there were
50 // any large objects such as Dex cache arrays which aren't freed since they are held live
51 // by the class linker.
52 }
53
Alloc(Thread * self,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)54 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
55 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
56 override {
57 mirror::Object* obj =
58 LargeObjectMapSpace::Alloc(self, num_bytes + MemoryToolRedZoneBytes() * 2, bytes_allocated,
59 usable_size, bytes_tl_bulk_allocated);
60 mirror::Object* object_without_rdz = reinterpret_cast<mirror::Object*>(
61 reinterpret_cast<uintptr_t>(obj) + MemoryToolRedZoneBytes());
62 MEMORY_TOOL_MAKE_NOACCESS(reinterpret_cast<void*>(obj), MemoryToolRedZoneBytes());
63 MEMORY_TOOL_MAKE_NOACCESS(
64 reinterpret_cast<uint8_t*>(object_without_rdz) + num_bytes,
65 MemoryToolRedZoneBytes());
66 if (usable_size != nullptr) {
67 *usable_size = num_bytes; // Since we have redzones, shrink the usable size.
68 }
69 return object_without_rdz;
70 }
71
AllocationSize(mirror::Object * obj,size_t * usable_size)72 size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override {
73 return LargeObjectMapSpace::AllocationSize(ObjectWithRedzone(obj), usable_size);
74 }
75
IsZygoteLargeObject(Thread * self,mirror::Object * obj) const76 bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override {
77 return LargeObjectMapSpace::IsZygoteLargeObject(self, ObjectWithRedzone(obj));
78 }
79
Free(Thread * self,mirror::Object * obj)80 size_t Free(Thread* self, mirror::Object* obj) override {
81 mirror::Object* object_with_rdz = ObjectWithRedzone(obj);
82 MEMORY_TOOL_MAKE_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
83 return LargeObjectMapSpace::Free(self, object_with_rdz);
84 }
85
Contains(const mirror::Object * obj) const86 bool Contains(const mirror::Object* obj) const override {
87 return LargeObjectMapSpace::Contains(ObjectWithRedzone(obj));
88 }
89
90 private:
MemoryToolRedZoneBytes()91 static size_t MemoryToolRedZoneBytes() {
92 return gPageSize;
93 }
94
ObjectWithRedzone(const mirror::Object * obj)95 static const mirror::Object* ObjectWithRedzone(const mirror::Object* obj) {
96 return reinterpret_cast<const mirror::Object*>(
97 reinterpret_cast<uintptr_t>(obj) - MemoryToolRedZoneBytes());
98 }
99
ObjectWithRedzone(mirror::Object * obj)100 static mirror::Object* ObjectWithRedzone(mirror::Object* obj) {
101 return reinterpret_cast<mirror::Object*>(
102 reinterpret_cast<uintptr_t>(obj) - MemoryToolRedZoneBytes());
103 }
104 };
105
SwapBitmaps()106 void LargeObjectSpace::SwapBitmaps() {
107 std::swap(live_bitmap_, mark_bitmap_);
108 // Preserve names to get more descriptive diagnostics.
109 std::string temp_name = live_bitmap_.GetName();
110 live_bitmap_.SetName(mark_bitmap_.GetName());
111 mark_bitmap_.SetName(temp_name);
112 }
113
LargeObjectSpace(const std::string & name,uint8_t * begin,uint8_t * end,const char * lock_name)114 LargeObjectSpace::LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end,
115 const char* lock_name)
116 : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
117 lock_(lock_name, kAllocSpaceLock),
118 num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
119 total_objects_allocated_(0), begin_(begin), end_(end) {
120 }
121
122
CopyLiveToMarked()123 void LargeObjectSpace::CopyLiveToMarked() {
124 mark_bitmap_.CopyFrom(&live_bitmap_);
125 }
126
LargeObjectMapSpace(const std::string & name)127 LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
128 : LargeObjectSpace(name, nullptr, nullptr, "large object map space lock") {}
129
Create(const std::string & name)130 LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
131 if (Runtime::Current()->IsRunningOnMemoryTool()) {
132 return new MemoryToolLargeObjectMapSpace(name);
133 } else {
134 return new LargeObjectMapSpace(name);
135 }
136 }
137
Alloc(Thread * self,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)138 mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
139 size_t* bytes_allocated, size_t* usable_size,
140 size_t* bytes_tl_bulk_allocated) {
141 DCHECK_LE(gPageSize, ObjectAlignment())
142 << "MapAnonymousAligned() should be used if the large-object alignment is larger than the "
143 "runtime page size";
144 std::string error_msg;
145 MemMap mem_map = MemMap::MapAnonymous("large object space allocation",
146 num_bytes,
147 PROT_READ | PROT_WRITE,
148 /*low_4gb=*/true,
149 &error_msg);
150 if (UNLIKELY(!mem_map.IsValid())) {
151 LOG(WARNING) << "Large object allocation failed: " << error_msg;
152 return nullptr;
153 }
154 mirror::Object* const obj = reinterpret_cast<mirror::Object*>(mem_map.Begin());
155 const size_t allocation_size = mem_map.BaseSize();
156 MutexLock mu(self, lock_);
157 large_objects_.Put(obj, LargeObject {std::move(mem_map), false /* not zygote */});
158 DCHECK(bytes_allocated != nullptr);
159
160 if (begin_ == nullptr || begin_ > reinterpret_cast<uint8_t*>(obj)) {
161 begin_ = reinterpret_cast<uint8_t*>(obj);
162 }
163 end_ = std::max(end_, reinterpret_cast<uint8_t*>(obj) + allocation_size);
164
165 *bytes_allocated = allocation_size;
166 if (usable_size != nullptr) {
167 *usable_size = allocation_size;
168 }
169 DCHECK(bytes_tl_bulk_allocated != nullptr);
170 *bytes_tl_bulk_allocated = allocation_size;
171 num_bytes_allocated_ += allocation_size;
172 total_bytes_allocated_ += allocation_size;
173 ++num_objects_allocated_;
174 ++total_objects_allocated_;
175 return obj;
176 }
177
IsZygoteLargeObject(Thread * self,mirror::Object * obj) const178 bool LargeObjectMapSpace::IsZygoteLargeObject(Thread* self, mirror::Object* obj) const {
179 MutexLock mu(self, lock_);
180 auto it = large_objects_.find(obj);
181 CHECK(it != large_objects_.end());
182 return it->second.is_zygote;
183 }
184
SetAllLargeObjectsAsZygoteObjects(Thread * self,bool set_mark_bit)185 void LargeObjectMapSpace::SetAllLargeObjectsAsZygoteObjects(Thread* self, bool set_mark_bit) {
186 MutexLock mu(self, lock_);
187 for (auto& pair : large_objects_) {
188 pair.second.is_zygote = true;
189 if (set_mark_bit) {
190 bool success = pair.first->AtomicSetMarkBit(0, 1);
191 CHECK(success);
192 }
193 }
194 }
195
Free(Thread * self,mirror::Object * ptr)196 size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) {
197 MutexLock mu(self, lock_);
198 auto it = large_objects_.find(ptr);
199 if (UNLIKELY(it == large_objects_.end())) {
200 ScopedObjectAccess soa(self);
201 Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(FATAL_WITHOUT_ABORT));
202 LOG(FATAL) << "Attempted to free large object " << ptr << " which was not live";
203 }
204 const size_t map_size = it->second.mem_map.BaseSize();
205 DCHECK_GE(num_bytes_allocated_, map_size);
206 size_t allocation_size = map_size;
207 num_bytes_allocated_ -= allocation_size;
208 --num_objects_allocated_;
209 large_objects_.erase(it);
210 return allocation_size;
211 }
212
AllocationSize(mirror::Object * obj,size_t * usable_size)213 size_t LargeObjectMapSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
214 MutexLock mu(Thread::Current(), lock_);
215 auto it = large_objects_.find(obj);
216 CHECK(it != large_objects_.end()) << "Attempted to get size of a large object which is not live";
217 size_t alloc_size = it->second.mem_map.BaseSize();
218 if (usable_size != nullptr) {
219 *usable_size = alloc_size;
220 }
221 return alloc_size;
222 }
223
FreeList(Thread * self,size_t num_ptrs,mirror::Object ** ptrs)224 size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
225 size_t total = 0;
226 for (size_t i = 0; i < num_ptrs; ++i) {
227 if (kDebugSpaces) {
228 CHECK(Contains(ptrs[i]));
229 }
230 total += Free(self, ptrs[i]);
231 }
232 return total;
233 }
234
Walk(DlMallocSpace::WalkCallback callback,void * arg)235 void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
236 MutexLock mu(Thread::Current(), lock_);
237 for (auto& pair : large_objects_) {
238 MemMap* mem_map = &pair.second.mem_map;
239 callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
240 callback(nullptr, nullptr, 0, arg);
241 }
242 }
243
ForEachMemMap(std::function<void (const MemMap &)> func) const244 void LargeObjectMapSpace::ForEachMemMap(std::function<void(const MemMap&)> func) const {
245 MutexLock mu(Thread::Current(), lock_);
246 for (auto& pair : large_objects_) {
247 func(pair.second.mem_map);
248 }
249 }
250
Contains(const mirror::Object * obj) const251 bool LargeObjectMapSpace::Contains(const mirror::Object* obj) const {
252 Thread* self = Thread::Current();
253 if (lock_.IsExclusiveHeld(self)) {
254 // We hold lock_ so do the check.
255 return large_objects_.find(const_cast<mirror::Object*>(obj)) != large_objects_.end();
256 } else {
257 MutexLock mu(self, lock_);
258 return large_objects_.find(const_cast<mirror::Object*>(obj)) != large_objects_.end();
259 }
260 }
261
262 // Keeps track of allocation sizes + whether or not the previous allocation is free.
263 // Used to coalesce free blocks and find the best fit block for an allocation for best fit object
264 // allocation. Each allocation has an AllocationInfo which contains the size of the previous free
265 // block preceding it. Implemented in such a way that we can also find the iterator for any
266 // allocation info pointer.
267 class AllocationInfo {
268 public:
AllocationInfo()269 AllocationInfo() : prev_free_(0), alloc_size_(0) {
270 }
271 // Return the number of blocks, of the large-object alignment in size each, that the allocation
272 // info covers.
AlignSize() const273 size_t AlignSize() const {
274 return alloc_size_ & kFlagsMask;
275 }
276 // Returns the allocation size in bytes.
ByteSize() const277 size_t ByteSize() const {
278 return AlignSize() * LargeObjectSpace::ObjectAlignment();
279 }
280 // Updates the allocation size and whether or not it is free.
SetByteSize(size_t size,bool free)281 void SetByteSize(size_t size, bool free) {
282 DCHECK_EQ(size & ~kFlagsMask, 0u);
283 DCHECK_ALIGNED_PARAM(size, LargeObjectSpace::ObjectAlignment());
284 alloc_size_ = (size / LargeObjectSpace::ObjectAlignment()) | (free ? kFlagFree : 0u);
285 }
286 // Returns true if the block is free.
IsFree() const287 bool IsFree() const {
288 return (alloc_size_ & kFlagFree) != 0;
289 }
290 // Return true if the large object is a zygote object.
IsZygoteObject() const291 bool IsZygoteObject() const {
292 return (alloc_size_ & kFlagZygote) != 0;
293 }
294 // Change the object to be a zygote object.
SetZygoteObject()295 void SetZygoteObject() {
296 alloc_size_ |= kFlagZygote;
297 }
298 // Return true if this is a zygote large object.
299 // Finds and returns the next non free allocation info after ourself.
GetNextInfo()300 AllocationInfo* GetNextInfo() {
301 return this + AlignSize();
302 }
GetNextInfo() const303 const AllocationInfo* GetNextInfo() const {
304 return this + AlignSize();
305 }
306 // Returns the previous free allocation info by using the prev_free_ member to figure out
307 // where it is. This is only used for coalescing so we only need to be able to do it if the
308 // previous allocation info is free.
GetPrevFreeInfo()309 AllocationInfo* GetPrevFreeInfo() {
310 DCHECK_NE(prev_free_, 0U);
311 return this - prev_free_;
312 }
313 // Returns the address of the object associated with this allocation info.
GetObjectAddress()314 mirror::Object* GetObjectAddress() {
315 return reinterpret_cast<mirror::Object*>(reinterpret_cast<uintptr_t>(this) + sizeof(*this));
316 }
317 // Return how many units, the large-object alignment value in size,
318 // there are before the free block.
GetPrevFree() const319 size_t GetPrevFree() const {
320 return prev_free_;
321 }
322 // Returns how many free bytes there are before the block.
GetPrevFreeBytes() const323 size_t GetPrevFreeBytes() const {
324 return GetPrevFree() * LargeObjectSpace::ObjectAlignment();
325 }
326 // Update the size of the free block prior to the allocation.
SetPrevFreeBytes(size_t bytes)327 void SetPrevFreeBytes(size_t bytes) {
328 DCHECK_ALIGNED_PARAM(bytes, LargeObjectSpace::ObjectAlignment());
329 prev_free_ = bytes / LargeObjectSpace::ObjectAlignment();
330 }
331
332 private:
333 static constexpr uint32_t kFlagFree = 0x80000000; // If block is free.
334 static constexpr uint32_t kFlagZygote = 0x40000000; // If the large object is a zygote object.
335 static constexpr uint32_t kFlagsMask = ~(kFlagFree | kFlagZygote); // Combined flags for masking.
336 // Contains the size of the previous free block with the large-object alignment value as the
337 // unit. If 0 then the allocation before us is not free.
338 // These variables are undefined in the middle of allocations / free blocks.
339 uint32_t prev_free_;
340 // Allocation size of this object in the large-object alignment value as the unit.
341 uint32_t alloc_size_;
342 };
343
GetSlotIndexForAllocationInfo(const AllocationInfo * info) const344 size_t FreeListSpace::GetSlotIndexForAllocationInfo(const AllocationInfo* info) const {
345 DCHECK_GE(info, allocation_info_);
346 DCHECK_LE(info, reinterpret_cast<AllocationInfo*>(allocation_info_map_.End()));
347 return info - allocation_info_;
348 }
349
GetAllocationInfoForAddress(uintptr_t address)350 AllocationInfo* FreeListSpace::GetAllocationInfoForAddress(uintptr_t address) {
351 return &allocation_info_[GetSlotIndexForAddress(address)];
352 }
353
GetAllocationInfoForAddress(uintptr_t address) const354 const AllocationInfo* FreeListSpace::GetAllocationInfoForAddress(uintptr_t address) const {
355 return &allocation_info_[GetSlotIndexForAddress(address)];
356 }
357
operator ()(const AllocationInfo * a,const AllocationInfo * b) const358 inline bool FreeListSpace::SortByPrevFree::operator()(const AllocationInfo* a,
359 const AllocationInfo* b) const {
360 if (a->GetPrevFree() < b->GetPrevFree()) return true;
361 if (a->GetPrevFree() > b->GetPrevFree()) return false;
362 if (a->AlignSize() < b->AlignSize()) return true;
363 if (a->AlignSize() > b->AlignSize()) return false;
364 return reinterpret_cast<uintptr_t>(a) < reinterpret_cast<uintptr_t>(b);
365 }
366
Create(const std::string & name,size_t size)367 FreeListSpace* FreeListSpace::Create(const std::string& name, size_t size) {
368 CHECK_ALIGNED_PARAM(size, ObjectAlignment());
369 DCHECK_LE(gPageSize, ObjectAlignment())
370 << "MapAnonymousAligned() should be used if the large-object alignment is larger than the "
371 "runtime page size";
372 std::string error_msg;
373 MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
374 size,
375 PROT_READ | PROT_WRITE,
376 /*low_4gb=*/true,
377 &error_msg);
378 CHECK(mem_map.IsValid()) << "Failed to allocate large object space mem map: " << error_msg;
379 return new FreeListSpace(name, std::move(mem_map), mem_map.Begin(), mem_map.End());
380 }
381
FreeListSpace(const std::string & name,MemMap && mem_map,uint8_t * begin,uint8_t * end)382 FreeListSpace::FreeListSpace(const std::string& name,
383 MemMap&& mem_map,
384 uint8_t* begin,
385 uint8_t* end)
386 : LargeObjectSpace(name, begin, end, "free list space lock"),
387 mem_map_(std::move(mem_map)) {
388 const size_t space_capacity = end - begin;
389 free_end_ = space_capacity;
390 CHECK_ALIGNED_PARAM(space_capacity, ObjectAlignment());
391 const size_t alloc_info_size = sizeof(AllocationInfo) * (space_capacity / ObjectAlignment());
392 std::string error_msg;
393 allocation_info_map_ =
394 MemMap::MapAnonymous("large object free list space allocation info map",
395 alloc_info_size,
396 PROT_READ | PROT_WRITE,
397 /*low_4gb=*/ false,
398 &error_msg);
399 CHECK(allocation_info_map_.IsValid()) << "Failed to allocate allocation info map" << error_msg;
400 allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_.Begin());
401 }
402
ClampGrowthLimit(size_t new_capacity)403 void FreeListSpace::ClampGrowthLimit(size_t new_capacity) {
404 MutexLock mu(Thread::Current(), lock_);
405 new_capacity = RoundUp(new_capacity, ObjectAlignment());
406 CHECK_LE(new_capacity, Size());
407 size_t diff = Size() - new_capacity;
408 // If we don't have enough free-bytes at the end to clamp, then do the best
409 // that we can.
410 if (diff > free_end_) {
411 new_capacity = Size() - free_end_;
412 diff = free_end_;
413 }
414
415 size_t alloc_info_size = sizeof(AllocationInfo) * (new_capacity / ObjectAlignment());
416 allocation_info_map_.SetSize(alloc_info_size);
417 mem_map_.SetSize(new_capacity);
418 // We don't need to change anything in 'free_blocks_' as the free block at
419 // the end of the space isn't in there.
420 free_end_ -= diff;
421 end_ -= diff;
422 }
423
~FreeListSpace()424 FreeListSpace::~FreeListSpace() {}
425
Walk(DlMallocSpace::WalkCallback callback,void * arg)426 void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
427 MutexLock mu(Thread::Current(), lock_);
428 const uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
429 AllocationInfo* cur_info = &allocation_info_[0];
430 const AllocationInfo* end_info = GetAllocationInfoForAddress(free_end_start);
431 while (cur_info < end_info) {
432 if (!cur_info->IsFree()) {
433 size_t alloc_size = cur_info->ByteSize();
434 uint8_t* byte_start = reinterpret_cast<uint8_t*>(GetAddressForAllocationInfo(cur_info));
435 uint8_t* byte_end = byte_start + alloc_size;
436 callback(byte_start, byte_end, alloc_size, arg);
437 callback(nullptr, nullptr, 0, arg);
438 }
439 cur_info = cur_info->GetNextInfo();
440 }
441 CHECK_EQ(cur_info, end_info);
442 }
443
ForEachMemMap(std::function<void (const MemMap &)> func) const444 void FreeListSpace::ForEachMemMap(std::function<void(const MemMap&)> func) const {
445 MutexLock mu(Thread::Current(), lock_);
446 func(allocation_info_map_);
447 func(mem_map_);
448 }
449
RemoveFreePrev(AllocationInfo * info)450 void FreeListSpace::RemoveFreePrev(AllocationInfo* info) {
451 CHECK_GT(info->GetPrevFree(), 0U);
452 auto it = free_blocks_.lower_bound(info);
453 CHECK(it != free_blocks_.end());
454 CHECK_EQ(*it, info);
455 free_blocks_.erase(it);
456 }
457
Free(Thread * self,mirror::Object * obj)458 size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) {
459 DCHECK(Contains(obj)) << reinterpret_cast<void*>(Begin()) << " " << obj << " "
460 << reinterpret_cast<void*>(End());
461 DCHECK_ALIGNED_PARAM(obj, ObjectAlignment());
462 AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
463 DCHECK(!info->IsFree());
464 const size_t allocation_size = info->ByteSize();
465 DCHECK_GT(allocation_size, 0U);
466 DCHECK_ALIGNED_PARAM(allocation_size, ObjectAlignment());
467
468 // madvise the pages without lock
469 madvise(obj, allocation_size, MADV_DONTNEED);
470 if (kIsDebugBuild) {
471 // Can't disallow reads since we use them to find next chunks during coalescing.
472 CheckedCall(mprotect, __FUNCTION__, obj, allocation_size, PROT_READ);
473 }
474
475 MutexLock mu(self, lock_);
476 info->SetByteSize(allocation_size, true); // Mark as free.
477 // Look at the next chunk.
478 AllocationInfo* next_info = info->GetNextInfo();
479 // Calculate the start of the end free block.
480 uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
481 size_t prev_free_bytes = info->GetPrevFreeBytes();
482 size_t new_free_size = allocation_size;
483 if (prev_free_bytes != 0) {
484 // Coalesce with previous free chunk.
485 new_free_size += prev_free_bytes;
486 RemoveFreePrev(info);
487 info = info->GetPrevFreeInfo();
488 // The previous allocation info must not be free since we are supposed to always coalesce.
489 DCHECK_EQ(info->GetPrevFreeBytes(), 0U) << "Previous allocation was free";
490 }
491 // NOTE: next_info could be pointing right after the allocation_info_map_
492 // when freeing object in the very end of the space. But that's safe
493 // as we don't dereference it in that case. We only use it to calculate
494 // next_addr using offset within the map.
495 uintptr_t next_addr = GetAddressForAllocationInfo(next_info);
496 if (next_addr >= free_end_start) {
497 // Easy case, the next chunk is the end free region.
498 CHECK_EQ(next_addr, free_end_start);
499 free_end_ += new_free_size;
500 } else {
501 AllocationInfo* new_free_info;
502 if (next_info->IsFree()) {
503 AllocationInfo* next_next_info = next_info->GetNextInfo();
504 // Next next info can't be free since we always coalesce.
505 DCHECK(!next_next_info->IsFree());
506 DCHECK_ALIGNED_PARAM(next_next_info->ByteSize(), ObjectAlignment());
507 new_free_info = next_next_info;
508 new_free_size += next_next_info->GetPrevFreeBytes();
509 RemoveFreePrev(next_next_info);
510 } else {
511 new_free_info = next_info;
512 }
513 new_free_info->SetPrevFreeBytes(new_free_size);
514 free_blocks_.insert(new_free_info);
515 info->SetByteSize(new_free_size, true);
516 DCHECK_EQ(info->GetNextInfo(), new_free_info);
517 }
518 --num_objects_allocated_;
519 DCHECK_LE(allocation_size, num_bytes_allocated_);
520 num_bytes_allocated_ -= allocation_size;
521 return allocation_size;
522 }
523
AllocationSize(mirror::Object * obj,size_t * usable_size)524 size_t FreeListSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
525 DCHECK(Contains(obj));
526 AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
527 DCHECK(!info->IsFree());
528 size_t alloc_size = info->ByteSize();
529 if (usable_size != nullptr) {
530 *usable_size = alloc_size;
531 }
532 return alloc_size;
533 }
534
Alloc(Thread * self,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)535 mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
536 size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
537 MutexLock mu(self, lock_);
538 const size_t allocation_size = RoundUp(num_bytes, ObjectAlignment());
539 AllocationInfo temp_info;
540 temp_info.SetPrevFreeBytes(allocation_size);
541 temp_info.SetByteSize(0, false);
542 AllocationInfo* new_info;
543 // Find the smallest chunk at least num_bytes in size.
544 auto it = free_blocks_.lower_bound(&temp_info);
545 if (it != free_blocks_.end()) {
546 AllocationInfo* info = *it;
547 free_blocks_.erase(it);
548 // Fit our object in the previous allocation info free space.
549 new_info = info->GetPrevFreeInfo();
550 // Remove the newly allocated block from the info and update the prev_free_.
551 info->SetPrevFreeBytes(info->GetPrevFreeBytes() - allocation_size);
552 if (info->GetPrevFreeBytes() > 0) {
553 AllocationInfo* new_free = info - info->GetPrevFree();
554 new_free->SetPrevFreeBytes(0);
555 new_free->SetByteSize(info->GetPrevFreeBytes(), true);
556 // If there is remaining space, insert back into the free set.
557 free_blocks_.insert(info);
558 }
559 } else {
560 // Try to steal some memory from the free space at the end of the space.
561 if (LIKELY(free_end_ >= allocation_size)) {
562 // Fit our object at the start of the end free block.
563 new_info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(End()) - free_end_);
564 free_end_ -= allocation_size;
565 } else {
566 return nullptr;
567 }
568 }
569 DCHECK(bytes_allocated != nullptr);
570 *bytes_allocated = allocation_size;
571 if (usable_size != nullptr) {
572 *usable_size = allocation_size;
573 }
574 DCHECK(bytes_tl_bulk_allocated != nullptr);
575 *bytes_tl_bulk_allocated = allocation_size;
576 // Need to do these inside of the lock.
577 ++num_objects_allocated_;
578 ++total_objects_allocated_;
579 num_bytes_allocated_ += allocation_size;
580 total_bytes_allocated_ += allocation_size;
581 mirror::Object* obj = reinterpret_cast<mirror::Object*>(GetAddressForAllocationInfo(new_info));
582 // We always put our object at the start of the free block, there cannot be another free block
583 // before it.
584 if (kIsDebugBuild) {
585 CheckedCall(mprotect, __FUNCTION__, obj, allocation_size, PROT_READ | PROT_WRITE);
586 }
587 new_info->SetPrevFreeBytes(0);
588 new_info->SetByteSize(allocation_size, false);
589 return obj;
590 }
591
Dump(std::ostream & os) const592 void FreeListSpace::Dump(std::ostream& os) const {
593 MutexLock mu(Thread::Current(), lock_);
594 os << GetName() << " -"
595 << " begin: " << reinterpret_cast<void*>(Begin())
596 << " end: " << reinterpret_cast<void*>(End()) << "\n";
597 uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
598 const AllocationInfo* cur_info =
599 GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(Begin()));
600 const AllocationInfo* end_info = GetAllocationInfoForAddress(free_end_start);
601 while (cur_info < end_info) {
602 size_t size = cur_info->ByteSize();
603 uintptr_t address = GetAddressForAllocationInfo(cur_info);
604 if (cur_info->IsFree()) {
605 os << "Free block at address: " << reinterpret_cast<const void*>(address)
606 << " of length " << size << " bytes\n";
607 } else {
608 os << "Large object at address: " << reinterpret_cast<const void*>(address)
609 << " of length " << size << " bytes\n";
610 }
611 cur_info = cur_info->GetNextInfo();
612 }
613 if (free_end_) {
614 os << "Free block at address: " << reinterpret_cast<const void*>(free_end_start)
615 << " of length " << free_end_ << " bytes\n";
616 }
617 }
618
IsZygoteLargeObject(Thread * self,mirror::Object * obj) const619 bool FreeListSpace::IsZygoteLargeObject([[maybe_unused]] Thread* self, mirror::Object* obj) const {
620 const AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
621 DCHECK(info != nullptr);
622 return info->IsZygoteObject();
623 }
624
SetAllLargeObjectsAsZygoteObjects(Thread * self,bool set_mark_bit)625 void FreeListSpace::SetAllLargeObjectsAsZygoteObjects(Thread* self, bool set_mark_bit) {
626 MutexLock mu(self, lock_);
627 uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
628 for (AllocationInfo* cur_info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(Begin())),
629 *end_info = GetAllocationInfoForAddress(free_end_start); cur_info < end_info;
630 cur_info = cur_info->GetNextInfo()) {
631 if (!cur_info->IsFree()) {
632 cur_info->SetZygoteObject();
633 if (set_mark_bit) {
634 ObjPtr<mirror::Object> obj =
635 reinterpret_cast<mirror::Object*>(GetAddressForAllocationInfo(cur_info));
636 bool success = obj->AtomicSetMarkBit(0, 1);
637 CHECK(success);
638 }
639 }
640 }
641 }
642
SweepCallback(size_t num_ptrs,mirror::Object ** ptrs,void * arg)643 void LargeObjectSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
644 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
645 space::LargeObjectSpace* space = context->space->AsLargeObjectSpace();
646 Thread* self = context->self;
647 Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
648 // If the bitmaps aren't swapped we need to clear the bits since the GC isn't going to re-swap
649 // the bitmaps as an optimization.
650 if (!context->swap_bitmaps) {
651 accounting::LargeObjectBitmap* bitmap = space->GetLiveBitmap();
652 for (size_t i = 0; i < num_ptrs; ++i) {
653 bitmap->Clear(ptrs[i]);
654 }
655 }
656 context->freed.objects += num_ptrs;
657 context->freed.bytes += space->FreeList(self, num_ptrs, ptrs);
658 }
659
Sweep(bool swap_bitmaps)660 collector::ObjectBytePair LargeObjectSpace::Sweep(bool swap_bitmaps) {
661 if (Begin() >= End()) {
662 return collector::ObjectBytePair(0, 0);
663 }
664 accounting::LargeObjectBitmap* live_bitmap = GetLiveBitmap();
665 accounting::LargeObjectBitmap* mark_bitmap = GetMarkBitmap();
666 if (swap_bitmaps) {
667 std::swap(live_bitmap, mark_bitmap);
668 }
669 AllocSpace::SweepCallbackContext scc(swap_bitmaps, this);
670 std::pair<uint8_t*, uint8_t*> range = GetBeginEndAtomic();
671 accounting::LargeObjectBitmap::SweepWalk(*live_bitmap, *mark_bitmap,
672 reinterpret_cast<uintptr_t>(range.first),
673 reinterpret_cast<uintptr_t>(range.second),
674 SweepCallback,
675 &scc);
676 return scc.freed;
677 }
678
LogFragmentationAllocFailure(std::ostream &,size_t)679 bool LargeObjectSpace::LogFragmentationAllocFailure(std::ostream& /*os*/,
680 size_t /*failed_alloc_bytes*/) {
681 UNIMPLEMENTED(FATAL);
682 UNREACHABLE();
683 }
684
GetBeginEndAtomic() const685 std::pair<uint8_t*, uint8_t*> LargeObjectMapSpace::GetBeginEndAtomic() const {
686 MutexLock mu(Thread::Current(), lock_);
687 return std::make_pair(Begin(), End());
688 }
689
GetBeginEndAtomic() const690 std::pair<uint8_t*, uint8_t*> FreeListSpace::GetBeginEndAtomic() const {
691 MutexLock mu(Thread::Current(), lock_);
692 return std::make_pair(Begin(), End());
693 }
694
695 } // namespace space
696 } // namespace gc
697 } // namespace art
698