• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "large_object_space.h"
18 
19 #include <sys/mman.h>
20 
21 #include <memory>
22 
23 #include "base/logging.h"
24 #include "base/memory_tool.h"
25 #include "base/mutex-inl.h"
26 #include "base/stl_util.h"
27 #include "gc/accounting/heap_bitmap-inl.h"
28 #include "gc/accounting/space_bitmap-inl.h"
29 #include "gc/heap.h"
30 #include "image.h"
31 #include "os.h"
32 #include "scoped_thread_state_change-inl.h"
33 #include "space-inl.h"
34 #include "thread-current-inl.h"
35 
36 namespace art {
37 namespace gc {
38 namespace space {
39 
40 class MemoryToolLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
41  public:
MemoryToolLargeObjectMapSpace(const std::string & name)42   explicit MemoryToolLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
43   }
44 
~MemoryToolLargeObjectMapSpace()45   ~MemoryToolLargeObjectMapSpace() OVERRIDE {
46     // Keep valgrind happy if there is any large objects such as dex cache arrays which aren't
47     // freed since they are held live by the class linker.
48     MutexLock mu(Thread::Current(), lock_);
49     for (auto& m : large_objects_) {
50       delete m.second.mem_map;
51     }
52   }
53 
Alloc(Thread * self,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)54   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
55                         size_t* usable_size, size_t* bytes_tl_bulk_allocated)
56       OVERRIDE {
57     mirror::Object* obj =
58         LargeObjectMapSpace::Alloc(self, num_bytes + kMemoryToolRedZoneBytes * 2, bytes_allocated,
59                                    usable_size, bytes_tl_bulk_allocated);
60     mirror::Object* object_without_rdz = reinterpret_cast<mirror::Object*>(
61         reinterpret_cast<uintptr_t>(obj) + kMemoryToolRedZoneBytes);
62     MEMORY_TOOL_MAKE_NOACCESS(reinterpret_cast<void*>(obj), kMemoryToolRedZoneBytes);
63     MEMORY_TOOL_MAKE_NOACCESS(
64         reinterpret_cast<uint8_t*>(object_without_rdz) + num_bytes,
65         kMemoryToolRedZoneBytes);
66     if (usable_size != nullptr) {
67       *usable_size = num_bytes;  // Since we have redzones, shrink the usable size.
68     }
69     return object_without_rdz;
70   }
71 
AllocationSize(mirror::Object * obj,size_t * usable_size)72   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
73     return LargeObjectMapSpace::AllocationSize(ObjectWithRedzone(obj), usable_size);
74   }
75 
IsZygoteLargeObject(Thread * self,mirror::Object * obj) const76   bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE {
77     return LargeObjectMapSpace::IsZygoteLargeObject(self, ObjectWithRedzone(obj));
78   }
79 
Free(Thread * self,mirror::Object * obj)80   size_t Free(Thread* self, mirror::Object* obj) OVERRIDE {
81     mirror::Object* object_with_rdz = ObjectWithRedzone(obj);
82     MEMORY_TOOL_MAKE_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
83     return LargeObjectMapSpace::Free(self, object_with_rdz);
84   }
85 
Contains(const mirror::Object * obj) const86   bool Contains(const mirror::Object* obj) const OVERRIDE {
87     return LargeObjectMapSpace::Contains(ObjectWithRedzone(obj));
88   }
89 
90  private:
ObjectWithRedzone(const mirror::Object * obj)91   static const mirror::Object* ObjectWithRedzone(const mirror::Object* obj) {
92     return reinterpret_cast<const mirror::Object*>(
93         reinterpret_cast<uintptr_t>(obj) - kMemoryToolRedZoneBytes);
94   }
95 
ObjectWithRedzone(mirror::Object * obj)96   static mirror::Object* ObjectWithRedzone(mirror::Object* obj) {
97     return reinterpret_cast<mirror::Object*>(
98         reinterpret_cast<uintptr_t>(obj) - kMemoryToolRedZoneBytes);
99   }
100 
101   static constexpr size_t kMemoryToolRedZoneBytes = kPageSize;
102 };
103 
SwapBitmaps()104 void LargeObjectSpace::SwapBitmaps() {
105   live_bitmap_.swap(mark_bitmap_);
106   // Swap names to get more descriptive diagnostics.
107   std::string temp_name = live_bitmap_->GetName();
108   live_bitmap_->SetName(mark_bitmap_->GetName());
109   mark_bitmap_->SetName(temp_name);
110 }
111 
LargeObjectSpace(const std::string & name,uint8_t * begin,uint8_t * end)112 LargeObjectSpace::LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end)
113     : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
114       num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
115       total_objects_allocated_(0), begin_(begin), end_(end) {
116 }
117 
118 
CopyLiveToMarked()119 void LargeObjectSpace::CopyLiveToMarked() {
120   mark_bitmap_->CopyFrom(live_bitmap_.get());
121 }
122 
LargeObjectMapSpace(const std::string & name)123 LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
124     : LargeObjectSpace(name, nullptr, nullptr),
125       lock_("large object map space lock", kAllocSpaceLock) {}
126 
Create(const std::string & name)127 LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
128   if (Runtime::Current()->IsRunningOnMemoryTool()) {
129     return new MemoryToolLargeObjectMapSpace(name);
130   } else {
131     return new LargeObjectMapSpace(name);
132   }
133 }
134 
Alloc(Thread * self,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)135 mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
136                                            size_t* bytes_allocated, size_t* usable_size,
137                                            size_t* bytes_tl_bulk_allocated) {
138   std::string error_msg;
139   MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", nullptr, num_bytes,
140                                          PROT_READ | PROT_WRITE, true, false, &error_msg);
141   if (UNLIKELY(mem_map == nullptr)) {
142     LOG(WARNING) << "Large object allocation failed: " << error_msg;
143     return nullptr;
144   }
145   mirror::Object* const obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
146   MutexLock mu(self, lock_);
147   large_objects_.Put(obj, LargeObject {mem_map, false /* not zygote */});
148   const size_t allocation_size = mem_map->BaseSize();
149   DCHECK(bytes_allocated != nullptr);
150 
151   if (begin_ == nullptr || begin_ > reinterpret_cast<uint8_t*>(obj)) {
152     begin_ = reinterpret_cast<uint8_t*>(obj);
153   }
154   end_ = std::max(end_, reinterpret_cast<uint8_t*>(obj) + allocation_size);
155 
156   *bytes_allocated = allocation_size;
157   if (usable_size != nullptr) {
158     *usable_size = allocation_size;
159   }
160   DCHECK(bytes_tl_bulk_allocated != nullptr);
161   *bytes_tl_bulk_allocated = allocation_size;
162   num_bytes_allocated_ += allocation_size;
163   total_bytes_allocated_ += allocation_size;
164   ++num_objects_allocated_;
165   ++total_objects_allocated_;
166   return obj;
167 }
168 
IsZygoteLargeObject(Thread * self,mirror::Object * obj) const169 bool LargeObjectMapSpace::IsZygoteLargeObject(Thread* self, mirror::Object* obj) const {
170   MutexLock mu(self, lock_);
171   auto it = large_objects_.find(obj);
172   CHECK(it != large_objects_.end());
173   return it->second.is_zygote;
174 }
175 
SetAllLargeObjectsAsZygoteObjects(Thread * self)176 void LargeObjectMapSpace::SetAllLargeObjectsAsZygoteObjects(Thread* self) {
177   MutexLock mu(self, lock_);
178   for (auto& pair : large_objects_) {
179     pair.second.is_zygote = true;
180   }
181 }
182 
Free(Thread * self,mirror::Object * ptr)183 size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) {
184   MutexLock mu(self, lock_);
185   auto it = large_objects_.find(ptr);
186   if (UNLIKELY(it == large_objects_.end())) {
187     ScopedObjectAccess soa(self);
188     Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(FATAL_WITHOUT_ABORT));
189     LOG(FATAL) << "Attempted to free large object " << ptr << " which was not live";
190   }
191   MemMap* mem_map = it->second.mem_map;
192   const size_t map_size = mem_map->BaseSize();
193   DCHECK_GE(num_bytes_allocated_, map_size);
194   size_t allocation_size = map_size;
195   num_bytes_allocated_ -= allocation_size;
196   --num_objects_allocated_;
197   delete mem_map;
198   large_objects_.erase(it);
199   return allocation_size;
200 }
201 
AllocationSize(mirror::Object * obj,size_t * usable_size)202 size_t LargeObjectMapSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
203   MutexLock mu(Thread::Current(), lock_);
204   auto it = large_objects_.find(obj);
205   CHECK(it != large_objects_.end()) << "Attempted to get size of a large object which is not live";
206   size_t alloc_size = it->second.mem_map->BaseSize();
207   if (usable_size != nullptr) {
208     *usable_size = alloc_size;
209   }
210   return alloc_size;
211 }
212 
FreeList(Thread * self,size_t num_ptrs,mirror::Object ** ptrs)213 size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
214   size_t total = 0;
215   for (size_t i = 0; i < num_ptrs; ++i) {
216     if (kDebugSpaces) {
217       CHECK(Contains(ptrs[i]));
218     }
219     total += Free(self, ptrs[i]);
220   }
221   return total;
222 }
223 
Walk(DlMallocSpace::WalkCallback callback,void * arg)224 void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
225   MutexLock mu(Thread::Current(), lock_);
226   for (auto& pair : large_objects_) {
227     MemMap* mem_map = pair.second.mem_map;
228     callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
229     callback(nullptr, nullptr, 0, arg);
230   }
231 }
232 
Contains(const mirror::Object * obj) const233 bool LargeObjectMapSpace::Contains(const mirror::Object* obj) const {
234   Thread* self = Thread::Current();
235   if (lock_.IsExclusiveHeld(self)) {
236     // We hold lock_ so do the check.
237     return large_objects_.find(const_cast<mirror::Object*>(obj)) != large_objects_.end();
238   } else {
239     MutexLock mu(self, lock_);
240     return large_objects_.find(const_cast<mirror::Object*>(obj)) != large_objects_.end();
241   }
242 }
243 
244 // Keeps track of allocation sizes + whether or not the previous allocation is free.
245 // Used to coalesce free blocks and find the best fit block for an allocation for best fit object
246 // allocation. Each allocation has an AllocationInfo which contains the size of the previous free
247 // block preceding it. Implemented in such a way that we can also find the iterator for any
248 // allocation info pointer.
249 class AllocationInfo {
250  public:
AllocationInfo()251   AllocationInfo() : prev_free_(0), alloc_size_(0) {
252   }
253   // Return the number of pages that the allocation info covers.
AlignSize() const254   size_t AlignSize() const {
255     return alloc_size_ & kFlagsMask;
256   }
257   // Returns the allocation size in bytes.
ByteSize() const258   size_t ByteSize() const {
259     return AlignSize() * FreeListSpace::kAlignment;
260   }
261   // Updates the allocation size and whether or not it is free.
SetByteSize(size_t size,bool free)262   void SetByteSize(size_t size, bool free) {
263     DCHECK_EQ(size & ~kFlagsMask, 0u);
264     DCHECK_ALIGNED(size, FreeListSpace::kAlignment);
265     alloc_size_ = (size / FreeListSpace::kAlignment) | (free ? kFlagFree : 0u);
266   }
267   // Returns true if the block is free.
IsFree() const268   bool IsFree() const {
269     return (alloc_size_ & kFlagFree) != 0;
270   }
271   // Return true if the large object is a zygote object.
IsZygoteObject() const272   bool IsZygoteObject() const {
273     return (alloc_size_ & kFlagZygote) != 0;
274   }
275   // Change the object to be a zygote object.
SetZygoteObject()276   void SetZygoteObject() {
277     alloc_size_ |= kFlagZygote;
278   }
279   // Return true if this is a zygote large object.
280   // Finds and returns the next non free allocation info after ourself.
GetNextInfo()281   AllocationInfo* GetNextInfo() {
282     return this + AlignSize();
283   }
GetNextInfo() const284   const AllocationInfo* GetNextInfo() const {
285     return this + AlignSize();
286   }
287   // Returns the previous free allocation info by using the prev_free_ member to figure out
288   // where it is. This is only used for coalescing so we only need to be able to do it if the
289   // previous allocation info is free.
GetPrevFreeInfo()290   AllocationInfo* GetPrevFreeInfo() {
291     DCHECK_NE(prev_free_, 0U);
292     return this - prev_free_;
293   }
294   // Returns the address of the object associated with this allocation info.
GetObjectAddress()295   mirror::Object* GetObjectAddress() {
296     return reinterpret_cast<mirror::Object*>(reinterpret_cast<uintptr_t>(this) + sizeof(*this));
297   }
298   // Return how many kAlignment units there are before the free block.
GetPrevFree() const299   size_t GetPrevFree() const {
300     return prev_free_;
301   }
302   // Returns how many free bytes there is before the block.
GetPrevFreeBytes() const303   size_t GetPrevFreeBytes() const {
304     return GetPrevFree() * FreeListSpace::kAlignment;
305   }
306   // Update the size of the free block prior to the allocation.
SetPrevFreeBytes(size_t bytes)307   void SetPrevFreeBytes(size_t bytes) {
308     DCHECK_ALIGNED(bytes, FreeListSpace::kAlignment);
309     prev_free_ = bytes / FreeListSpace::kAlignment;
310   }
311 
312  private:
313   static constexpr uint32_t kFlagFree = 0x80000000;  // If block is free.
314   static constexpr uint32_t kFlagZygote = 0x40000000;  // If the large object is a zygote object.
315   static constexpr uint32_t kFlagsMask = ~(kFlagFree | kFlagZygote);  // Combined flags for masking.
316   // Contains the size of the previous free block with kAlignment as the unit. If 0 then the
317   // allocation before us is not free.
318   // These variables are undefined in the middle of allocations / free blocks.
319   uint32_t prev_free_;
320   // Allocation size of this object in kAlignment as the unit.
321   uint32_t alloc_size_;
322 };
323 
GetSlotIndexForAllocationInfo(const AllocationInfo * info) const324 size_t FreeListSpace::GetSlotIndexForAllocationInfo(const AllocationInfo* info) const {
325   DCHECK_GE(info, allocation_info_);
326   DCHECK_LT(info, reinterpret_cast<AllocationInfo*>(allocation_info_map_->End()));
327   return info - allocation_info_;
328 }
329 
GetAllocationInfoForAddress(uintptr_t address)330 AllocationInfo* FreeListSpace::GetAllocationInfoForAddress(uintptr_t address) {
331   return &allocation_info_[GetSlotIndexForAddress(address)];
332 }
333 
GetAllocationInfoForAddress(uintptr_t address) const334 const AllocationInfo* FreeListSpace::GetAllocationInfoForAddress(uintptr_t address) const {
335   return &allocation_info_[GetSlotIndexForAddress(address)];
336 }
337 
operator ()(const AllocationInfo * a,const AllocationInfo * b) const338 inline bool FreeListSpace::SortByPrevFree::operator()(const AllocationInfo* a,
339                                                       const AllocationInfo* b) const {
340   if (a->GetPrevFree() < b->GetPrevFree()) return true;
341   if (a->GetPrevFree() > b->GetPrevFree()) return false;
342   if (a->AlignSize() < b->AlignSize()) return true;
343   if (a->AlignSize() > b->AlignSize()) return false;
344   return reinterpret_cast<uintptr_t>(a) < reinterpret_cast<uintptr_t>(b);
345 }
346 
Create(const std::string & name,uint8_t * requested_begin,size_t size)347 FreeListSpace* FreeListSpace::Create(const std::string& name, uint8_t* requested_begin, size_t size) {
348   CHECK_EQ(size % kAlignment, 0U);
349   std::string error_msg;
350   MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
351                                          PROT_READ | PROT_WRITE, true, false, &error_msg);
352   CHECK(mem_map != nullptr) << "Failed to allocate large object space mem map: " << error_msg;
353   return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
354 }
355 
FreeListSpace(const std::string & name,MemMap * mem_map,uint8_t * begin,uint8_t * end)356 FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end)
357     : LargeObjectSpace(name, begin, end),
358       mem_map_(mem_map),
359       lock_("free list space lock", kAllocSpaceLock) {
360   const size_t space_capacity = end - begin;
361   free_end_ = space_capacity;
362   CHECK_ALIGNED(space_capacity, kAlignment);
363   const size_t alloc_info_size = sizeof(AllocationInfo) * (space_capacity / kAlignment);
364   std::string error_msg;
365   allocation_info_map_.reset(
366       MemMap::MapAnonymous("large object free list space allocation info map",
367                            nullptr, alloc_info_size, PROT_READ | PROT_WRITE,
368                            false, false, &error_msg));
369   CHECK(allocation_info_map_.get() != nullptr) << "Failed to allocate allocation info map"
370       << error_msg;
371   allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_->Begin());
372 }
373 
~FreeListSpace()374 FreeListSpace::~FreeListSpace() {}
375 
Walk(DlMallocSpace::WalkCallback callback,void * arg)376 void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
377   MutexLock mu(Thread::Current(), lock_);
378   const uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
379   AllocationInfo* cur_info = &allocation_info_[0];
380   const AllocationInfo* end_info = GetAllocationInfoForAddress(free_end_start);
381   while (cur_info < end_info) {
382     if (!cur_info->IsFree()) {
383       size_t alloc_size = cur_info->ByteSize();
384       uint8_t* byte_start = reinterpret_cast<uint8_t*>(GetAddressForAllocationInfo(cur_info));
385       uint8_t* byte_end = byte_start + alloc_size;
386       callback(byte_start, byte_end, alloc_size, arg);
387       callback(nullptr, nullptr, 0, arg);
388     }
389     cur_info = cur_info->GetNextInfo();
390   }
391   CHECK_EQ(cur_info, end_info);
392 }
393 
RemoveFreePrev(AllocationInfo * info)394 void FreeListSpace::RemoveFreePrev(AllocationInfo* info) {
395   CHECK_GT(info->GetPrevFree(), 0U);
396   auto it = free_blocks_.lower_bound(info);
397   CHECK(it != free_blocks_.end());
398   CHECK_EQ(*it, info);
399   free_blocks_.erase(it);
400 }
401 
Free(Thread * self,mirror::Object * obj)402 size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) {
403   MutexLock mu(self, lock_);
404   DCHECK(Contains(obj)) << reinterpret_cast<void*>(Begin()) << " " << obj << " "
405                         << reinterpret_cast<void*>(End());
406   DCHECK_ALIGNED(obj, kAlignment);
407   AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
408   DCHECK(!info->IsFree());
409   const size_t allocation_size = info->ByteSize();
410   DCHECK_GT(allocation_size, 0U);
411   DCHECK_ALIGNED(allocation_size, kAlignment);
412   info->SetByteSize(allocation_size, true);  // Mark as free.
413   // Look at the next chunk.
414   AllocationInfo* next_info = info->GetNextInfo();
415   // Calculate the start of the end free block.
416   uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
417   size_t prev_free_bytes = info->GetPrevFreeBytes();
418   size_t new_free_size = allocation_size;
419   if (prev_free_bytes != 0) {
420     // Coalesce with previous free chunk.
421     new_free_size += prev_free_bytes;
422     RemoveFreePrev(info);
423     info = info->GetPrevFreeInfo();
424     // The previous allocation info must not be free since we are supposed to always coalesce.
425     DCHECK_EQ(info->GetPrevFreeBytes(), 0U) << "Previous allocation was free";
426   }
427   uintptr_t next_addr = GetAddressForAllocationInfo(next_info);
428   if (next_addr >= free_end_start) {
429     // Easy case, the next chunk is the end free region.
430     CHECK_EQ(next_addr, free_end_start);
431     free_end_ += new_free_size;
432   } else {
433     AllocationInfo* new_free_info;
434     if (next_info->IsFree()) {
435       AllocationInfo* next_next_info = next_info->GetNextInfo();
436       // Next next info can't be free since we always coalesce.
437       DCHECK(!next_next_info->IsFree());
438       DCHECK_ALIGNED(next_next_info->ByteSize(), kAlignment);
439       new_free_info = next_next_info;
440       new_free_size += next_next_info->GetPrevFreeBytes();
441       RemoveFreePrev(next_next_info);
442     } else {
443       new_free_info = next_info;
444     }
445     new_free_info->SetPrevFreeBytes(new_free_size);
446     free_blocks_.insert(new_free_info);
447     info->SetByteSize(new_free_size, true);
448     DCHECK_EQ(info->GetNextInfo(), new_free_info);
449   }
450   --num_objects_allocated_;
451   DCHECK_LE(allocation_size, num_bytes_allocated_);
452   num_bytes_allocated_ -= allocation_size;
453   madvise(obj, allocation_size, MADV_DONTNEED);
454   if (kIsDebugBuild) {
455     // Can't disallow reads since we use them to find next chunks during coalescing.
456     mprotect(obj, allocation_size, PROT_READ);
457   }
458   return allocation_size;
459 }
460 
AllocationSize(mirror::Object * obj,size_t * usable_size)461 size_t FreeListSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
462   DCHECK(Contains(obj));
463   AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
464   DCHECK(!info->IsFree());
465   size_t alloc_size = info->ByteSize();
466   if (usable_size != nullptr) {
467     *usable_size = alloc_size;
468   }
469   return alloc_size;
470 }
471 
Alloc(Thread * self,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)472 mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
473                                      size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
474   MutexLock mu(self, lock_);
475   const size_t allocation_size = RoundUp(num_bytes, kAlignment);
476   AllocationInfo temp_info;
477   temp_info.SetPrevFreeBytes(allocation_size);
478   temp_info.SetByteSize(0, false);
479   AllocationInfo* new_info;
480   // Find the smallest chunk at least num_bytes in size.
481   auto it = free_blocks_.lower_bound(&temp_info);
482   if (it != free_blocks_.end()) {
483     AllocationInfo* info = *it;
484     free_blocks_.erase(it);
485     // Fit our object in the previous allocation info free space.
486     new_info = info->GetPrevFreeInfo();
487     // Remove the newly allocated block from the info and update the prev_free_.
488     info->SetPrevFreeBytes(info->GetPrevFreeBytes() - allocation_size);
489     if (info->GetPrevFreeBytes() > 0) {
490       AllocationInfo* new_free = info - info->GetPrevFree();
491       new_free->SetPrevFreeBytes(0);
492       new_free->SetByteSize(info->GetPrevFreeBytes(), true);
493       // If there is remaining space, insert back into the free set.
494       free_blocks_.insert(info);
495     }
496   } else {
497     // Try to steal some memory from the free space at the end of the space.
498     if (LIKELY(free_end_ >= allocation_size)) {
499       // Fit our object at the start of the end free block.
500       new_info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(End()) - free_end_);
501       free_end_ -= allocation_size;
502     } else {
503       return nullptr;
504     }
505   }
506   DCHECK(bytes_allocated != nullptr);
507   *bytes_allocated = allocation_size;
508   if (usable_size != nullptr) {
509     *usable_size = allocation_size;
510   }
511   DCHECK(bytes_tl_bulk_allocated != nullptr);
512   *bytes_tl_bulk_allocated = allocation_size;
513   // Need to do these inside of the lock.
514   ++num_objects_allocated_;
515   ++total_objects_allocated_;
516   num_bytes_allocated_ += allocation_size;
517   total_bytes_allocated_ += allocation_size;
518   mirror::Object* obj = reinterpret_cast<mirror::Object*>(GetAddressForAllocationInfo(new_info));
519   // We always put our object at the start of the free block, there cannot be another free block
520   // before it.
521   if (kIsDebugBuild) {
522     mprotect(obj, allocation_size, PROT_READ | PROT_WRITE);
523   }
524   new_info->SetPrevFreeBytes(0);
525   new_info->SetByteSize(allocation_size, false);
526   return obj;
527 }
528 
Dump(std::ostream & os) const529 void FreeListSpace::Dump(std::ostream& os) const {
530   MutexLock mu(Thread::Current(), lock_);
531   os << GetName() << " -"
532      << " begin: " << reinterpret_cast<void*>(Begin())
533      << " end: " << reinterpret_cast<void*>(End()) << "\n";
534   uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
535   const AllocationInfo* cur_info =
536       GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(Begin()));
537   const AllocationInfo* end_info = GetAllocationInfoForAddress(free_end_start);
538   while (cur_info < end_info) {
539     size_t size = cur_info->ByteSize();
540     uintptr_t address = GetAddressForAllocationInfo(cur_info);
541     if (cur_info->IsFree()) {
542       os << "Free block at address: " << reinterpret_cast<const void*>(address)
543          << " of length " << size << " bytes\n";
544     } else {
545       os << "Large object at address: " << reinterpret_cast<const void*>(address)
546          << " of length " << size << " bytes\n";
547     }
548     cur_info = cur_info->GetNextInfo();
549   }
550   if (free_end_) {
551     os << "Free block at address: " << reinterpret_cast<const void*>(free_end_start)
552        << " of length " << free_end_ << " bytes\n";
553   }
554 }
555 
IsZygoteLargeObject(Thread * self ATTRIBUTE_UNUSED,mirror::Object * obj) const556 bool FreeListSpace::IsZygoteLargeObject(Thread* self ATTRIBUTE_UNUSED, mirror::Object* obj) const {
557   const AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
558   DCHECK(info != nullptr);
559   return info->IsZygoteObject();
560 }
561 
SetAllLargeObjectsAsZygoteObjects(Thread * self)562 void FreeListSpace::SetAllLargeObjectsAsZygoteObjects(Thread* self) {
563   MutexLock mu(self, lock_);
564   uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
565   for (AllocationInfo* cur_info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(Begin())),
566       *end_info = GetAllocationInfoForAddress(free_end_start); cur_info < end_info;
567       cur_info = cur_info->GetNextInfo()) {
568     if (!cur_info->IsFree()) {
569       cur_info->SetZygoteObject();
570     }
571   }
572 }
573 
SweepCallback(size_t num_ptrs,mirror::Object ** ptrs,void * arg)574 void LargeObjectSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
575   SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
576   space::LargeObjectSpace* space = context->space->AsLargeObjectSpace();
577   Thread* self = context->self;
578   Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
579   // If the bitmaps aren't swapped we need to clear the bits since the GC isn't going to re-swap
580   // the bitmaps as an optimization.
581   if (!context->swap_bitmaps) {
582     accounting::LargeObjectBitmap* bitmap = space->GetLiveBitmap();
583     for (size_t i = 0; i < num_ptrs; ++i) {
584       bitmap->Clear(ptrs[i]);
585     }
586   }
587   context->freed.objects += num_ptrs;
588   context->freed.bytes += space->FreeList(self, num_ptrs, ptrs);
589 }
590 
Sweep(bool swap_bitmaps)591 collector::ObjectBytePair LargeObjectSpace::Sweep(bool swap_bitmaps) {
592   if (Begin() >= End()) {
593     return collector::ObjectBytePair(0, 0);
594   }
595   accounting::LargeObjectBitmap* live_bitmap = GetLiveBitmap();
596   accounting::LargeObjectBitmap* mark_bitmap = GetMarkBitmap();
597   if (swap_bitmaps) {
598     std::swap(live_bitmap, mark_bitmap);
599   }
600   AllocSpace::SweepCallbackContext scc(swap_bitmaps, this);
601   std::pair<uint8_t*, uint8_t*> range = GetBeginEndAtomic();
602   accounting::LargeObjectBitmap::SweepWalk(*live_bitmap, *mark_bitmap,
603                                            reinterpret_cast<uintptr_t>(range.first),
604                                            reinterpret_cast<uintptr_t>(range.second),
605                                            SweepCallback,
606                                            &scc);
607   return scc.freed;
608 }
609 
LogFragmentationAllocFailure(std::ostream &,size_t)610 void LargeObjectSpace::LogFragmentationAllocFailure(std::ostream& /*os*/,
611                                                     size_t /*failed_alloc_bytes*/) {
612   UNIMPLEMENTED(FATAL);
613 }
614 
GetBeginEndAtomic() const615 std::pair<uint8_t*, uint8_t*> LargeObjectMapSpace::GetBeginEndAtomic() const {
616   MutexLock mu(Thread::Current(), lock_);
617   return std::make_pair(Begin(), End());
618 }
619 
GetBeginEndAtomic() const620 std::pair<uint8_t*, uint8_t*> FreeListSpace::GetBeginEndAtomic() const {
621   MutexLock mu(Thread::Current(), lock_);
622   return std::make_pair(Begin(), End());
623 }
624 
625 }  // namespace space
626 }  // namespace gc
627 }  // namespace art
628