• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "large_object_space.h"
18 
19 #include <valgrind.h>
20 #include <memory>
21 #include <memcheck/memcheck.h>
22 
23 #include "gc/accounting/heap_bitmap-inl.h"
24 #include "gc/accounting/space_bitmap-inl.h"
25 #include "base/logging.h"
26 #include "base/mutex-inl.h"
27 #include "base/stl_util.h"
28 #include "image.h"
29 #include "os.h"
30 #include "scoped_thread_state_change.h"
31 #include "space-inl.h"
32 #include "thread-inl.h"
33 
34 namespace art {
35 namespace gc {
36 namespace space {
37 
38 class MemoryToolLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
39  public:
MemoryToolLargeObjectMapSpace(const std::string & name)40   explicit MemoryToolLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
41   }
42 
~MemoryToolLargeObjectMapSpace()43   ~MemoryToolLargeObjectMapSpace() OVERRIDE {
44     // Keep valgrind happy if there is any large objects such as dex cache arrays which aren't
45     // freed since they are held live by the class linker.
46     MutexLock mu(Thread::Current(), lock_);
47     for (auto& m : large_objects_) {
48       delete m.second.mem_map;
49     }
50   }
51 
Alloc(Thread * self,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)52   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
53                         size_t* usable_size, size_t* bytes_tl_bulk_allocated)
54       OVERRIDE {
55     mirror::Object* obj =
56         LargeObjectMapSpace::Alloc(self, num_bytes + kMemoryToolRedZoneBytes * 2, bytes_allocated,
57                                    usable_size, bytes_tl_bulk_allocated);
58     mirror::Object* object_without_rdz = reinterpret_cast<mirror::Object*>(
59         reinterpret_cast<uintptr_t>(obj) + kMemoryToolRedZoneBytes);
60     MEMORY_TOOL_MAKE_NOACCESS(reinterpret_cast<void*>(obj), kMemoryToolRedZoneBytes);
61     MEMORY_TOOL_MAKE_NOACCESS(
62         reinterpret_cast<uint8_t*>(object_without_rdz) + num_bytes,
63         kMemoryToolRedZoneBytes);
64     if (usable_size != nullptr) {
65       *usable_size = num_bytes;  // Since we have redzones, shrink the usable size.
66     }
67     return object_without_rdz;
68   }
69 
AllocationSize(mirror::Object * obj,size_t * usable_size)70   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
71     return LargeObjectMapSpace::AllocationSize(ObjectWithRedzone(obj), usable_size);
72   }
73 
IsZygoteLargeObject(Thread * self,mirror::Object * obj) const74   bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE {
75     return LargeObjectMapSpace::IsZygoteLargeObject(self, ObjectWithRedzone(obj));
76   }
77 
Free(Thread * self,mirror::Object * obj)78   size_t Free(Thread* self, mirror::Object* obj) OVERRIDE {
79     mirror::Object* object_with_rdz = ObjectWithRedzone(obj);
80     MEMORY_TOOL_MAKE_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
81     return LargeObjectMapSpace::Free(self, object_with_rdz);
82   }
83 
Contains(const mirror::Object * obj) const84   bool Contains(const mirror::Object* obj) const OVERRIDE {
85     return LargeObjectMapSpace::Contains(ObjectWithRedzone(obj));
86   }
87 
88  private:
ObjectWithRedzone(const mirror::Object * obj)89   static const mirror::Object* ObjectWithRedzone(const mirror::Object* obj) {
90     return reinterpret_cast<const mirror::Object*>(
91         reinterpret_cast<uintptr_t>(obj) - kMemoryToolRedZoneBytes);
92   }
93 
ObjectWithRedzone(mirror::Object * obj)94   static mirror::Object* ObjectWithRedzone(mirror::Object* obj) {
95     return reinterpret_cast<mirror::Object*>(
96         reinterpret_cast<uintptr_t>(obj) - kMemoryToolRedZoneBytes);
97   }
98 
99   static constexpr size_t kMemoryToolRedZoneBytes = kPageSize;
100 };
101 
SwapBitmaps()102 void LargeObjectSpace::SwapBitmaps() {
103   live_bitmap_.swap(mark_bitmap_);
104   // Swap names to get more descriptive diagnostics.
105   std::string temp_name = live_bitmap_->GetName();
106   live_bitmap_->SetName(mark_bitmap_->GetName());
107   mark_bitmap_->SetName(temp_name);
108 }
109 
LargeObjectSpace(const std::string & name,uint8_t * begin,uint8_t * end)110 LargeObjectSpace::LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end)
111     : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
112       num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
113       total_objects_allocated_(0), begin_(begin), end_(end) {
114 }
115 
116 
CopyLiveToMarked()117 void LargeObjectSpace::CopyLiveToMarked() {
118   mark_bitmap_->CopyFrom(live_bitmap_.get());
119 }
120 
LargeObjectMapSpace(const std::string & name)121 LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
122     : LargeObjectSpace(name, nullptr, nullptr),
123       lock_("large object map space lock", kAllocSpaceLock) {}
124 
Create(const std::string & name)125 LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
126   if (Runtime::Current()->IsRunningOnMemoryTool()) {
127     return new MemoryToolLargeObjectMapSpace(name);
128   } else {
129     return new LargeObjectMapSpace(name);
130   }
131 }
132 
Alloc(Thread * self,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)133 mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
134                                            size_t* bytes_allocated, size_t* usable_size,
135                                            size_t* bytes_tl_bulk_allocated) {
136   std::string error_msg;
137   MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", nullptr, num_bytes,
138                                          PROT_READ | PROT_WRITE, true, false, &error_msg);
139   if (UNLIKELY(mem_map == nullptr)) {
140     LOG(WARNING) << "Large object allocation failed: " << error_msg;
141     return nullptr;
142   }
143   mirror::Object* const obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
144   if (kIsDebugBuild) {
145     ReaderMutexLock mu2(Thread::Current(), *Locks::heap_bitmap_lock_);
146     auto* heap = Runtime::Current()->GetHeap();
147     auto* live_bitmap = heap->GetLiveBitmap();
148     auto* space_bitmap = live_bitmap->GetContinuousSpaceBitmap(obj);
149     CHECK(space_bitmap == nullptr) << obj << " overlaps with bitmap " << *space_bitmap;
150     auto* obj_end = reinterpret_cast<mirror::Object*>(mem_map->End());
151     space_bitmap = live_bitmap->GetContinuousSpaceBitmap(obj_end - 1);
152     CHECK(space_bitmap == nullptr) << obj_end << " overlaps with bitmap " << *space_bitmap;
153   }
154   MutexLock mu(self, lock_);
155   large_objects_.Put(obj, LargeObject {mem_map, false /* not zygote */});
156   const size_t allocation_size = mem_map->BaseSize();
157   DCHECK(bytes_allocated != nullptr);
158   begin_ = std::min(begin_, reinterpret_cast<uint8_t*>(obj));
159   uint8_t* obj_end = reinterpret_cast<uint8_t*>(obj) + allocation_size;
160   if (end_ == nullptr || obj_end > end_) {
161     end_ = obj_end;
162   }
163   *bytes_allocated = allocation_size;
164   if (usable_size != nullptr) {
165     *usable_size = allocation_size;
166   }
167   DCHECK(bytes_tl_bulk_allocated != nullptr);
168   *bytes_tl_bulk_allocated = allocation_size;
169   num_bytes_allocated_ += allocation_size;
170   total_bytes_allocated_ += allocation_size;
171   ++num_objects_allocated_;
172   ++total_objects_allocated_;
173   return obj;
174 }
175 
IsZygoteLargeObject(Thread * self,mirror::Object * obj) const176 bool LargeObjectMapSpace::IsZygoteLargeObject(Thread* self, mirror::Object* obj) const {
177   MutexLock mu(self, lock_);
178   auto it = large_objects_.find(obj);
179   CHECK(it != large_objects_.end());
180   return it->second.is_zygote;
181 }
182 
SetAllLargeObjectsAsZygoteObjects(Thread * self)183 void LargeObjectMapSpace::SetAllLargeObjectsAsZygoteObjects(Thread* self) {
184   MutexLock mu(self, lock_);
185   for (auto& pair : large_objects_) {
186     pair.second.is_zygote = true;
187   }
188 }
189 
Free(Thread * self,mirror::Object * ptr)190 size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) {
191   MutexLock mu(self, lock_);
192   auto it = large_objects_.find(ptr);
193   if (UNLIKELY(it == large_objects_.end())) {
194     ScopedObjectAccess soa(self);
195     Runtime::Current()->GetHeap()->DumpSpaces(LOG(INTERNAL_FATAL));
196     LOG(FATAL) << "Attempted to free large object " << ptr << " which was not live";
197   }
198   MemMap* mem_map = it->second.mem_map;
199   const size_t map_size = mem_map->BaseSize();
200   DCHECK_GE(num_bytes_allocated_, map_size);
201   size_t allocation_size = map_size;
202   num_bytes_allocated_ -= allocation_size;
203   --num_objects_allocated_;
204   delete mem_map;
205   large_objects_.erase(it);
206   return allocation_size;
207 }
208 
AllocationSize(mirror::Object * obj,size_t * usable_size)209 size_t LargeObjectMapSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
210   MutexLock mu(Thread::Current(), lock_);
211   auto it = large_objects_.find(obj);
212   CHECK(it != large_objects_.end()) << "Attempted to get size of a large object which is not live";
213   size_t alloc_size = it->second.mem_map->BaseSize();
214   if (usable_size != nullptr) {
215     *usable_size = alloc_size;
216   }
217   return alloc_size;
218 }
219 
FreeList(Thread * self,size_t num_ptrs,mirror::Object ** ptrs)220 size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
221   size_t total = 0;
222   for (size_t i = 0; i < num_ptrs; ++i) {
223     if (kDebugSpaces) {
224       CHECK(Contains(ptrs[i]));
225     }
226     total += Free(self, ptrs[i]);
227   }
228   return total;
229 }
230 
Walk(DlMallocSpace::WalkCallback callback,void * arg)231 void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
232   MutexLock mu(Thread::Current(), lock_);
233   for (auto& pair : large_objects_) {
234     MemMap* mem_map = pair.second.mem_map;
235     callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
236     callback(nullptr, nullptr, 0, arg);
237   }
238 }
239 
Contains(const mirror::Object * obj) const240 bool LargeObjectMapSpace::Contains(const mirror::Object* obj) const {
241   Thread* self = Thread::Current();
242   if (lock_.IsExclusiveHeld(self)) {
243     // We hold lock_ so do the check.
244     return large_objects_.find(const_cast<mirror::Object*>(obj)) != large_objects_.end();
245   } else {
246     MutexLock mu(self, lock_);
247     return large_objects_.find(const_cast<mirror::Object*>(obj)) != large_objects_.end();
248   }
249 }
250 
251 // Keeps track of allocation sizes + whether or not the previous allocation is free.
252 // Used to coalesce free blocks and find the best fit block for an allocation for best fit object
253 // allocation. Each allocation has an AllocationInfo which contains the size of the previous free
254 // block preceding it. Implemented in such a way that we can also find the iterator for any
255 // allocation info pointer.
256 class AllocationInfo {
257  public:
AllocationInfo()258   AllocationInfo() : prev_free_(0), alloc_size_(0) {
259   }
260   // Return the number of pages that the allocation info covers.
AlignSize() const261   size_t AlignSize() const {
262     return alloc_size_ & kFlagsMask;
263   }
264   // Returns the allocation size in bytes.
ByteSize() const265   size_t ByteSize() const {
266     return AlignSize() * FreeListSpace::kAlignment;
267   }
268   // Updates the allocation size and whether or not it is free.
SetByteSize(size_t size,bool free)269   void SetByteSize(size_t size, bool free) {
270     DCHECK_EQ(size & ~kFlagsMask, 0u);
271     DCHECK_ALIGNED(size, FreeListSpace::kAlignment);
272     alloc_size_ = (size / FreeListSpace::kAlignment) | (free ? kFlagFree : 0u);
273   }
274   // Returns true if the block is free.
IsFree() const275   bool IsFree() const {
276     return (alloc_size_ & kFlagFree) != 0;
277   }
278   // Return true if the large object is a zygote object.
IsZygoteObject() const279   bool IsZygoteObject() const {
280     return (alloc_size_ & kFlagZygote) != 0;
281   }
282   // Change the object to be a zygote object.
SetZygoteObject()283   void SetZygoteObject() {
284     alloc_size_ |= kFlagZygote;
285   }
286   // Return true if this is a zygote large object.
287   // Finds and returns the next non free allocation info after ourself.
GetNextInfo()288   AllocationInfo* GetNextInfo() {
289     return this + AlignSize();
290   }
GetNextInfo() const291   const AllocationInfo* GetNextInfo() const {
292     return this + AlignSize();
293   }
294   // Returns the previous free allocation info by using the prev_free_ member to figure out
295   // where it is. This is only used for coalescing so we only need to be able to do it if the
296   // previous allocation info is free.
GetPrevFreeInfo()297   AllocationInfo* GetPrevFreeInfo() {
298     DCHECK_NE(prev_free_, 0U);
299     return this - prev_free_;
300   }
301   // Returns the address of the object associated with this allocation info.
GetObjectAddress()302   mirror::Object* GetObjectAddress() {
303     return reinterpret_cast<mirror::Object*>(reinterpret_cast<uintptr_t>(this) + sizeof(*this));
304   }
305   // Return how many kAlignment units there are before the free block.
GetPrevFree() const306   size_t GetPrevFree() const {
307     return prev_free_;
308   }
309   // Returns how many free bytes there is before the block.
GetPrevFreeBytes() const310   size_t GetPrevFreeBytes() const {
311     return GetPrevFree() * FreeListSpace::kAlignment;
312   }
313   // Update the size of the free block prior to the allocation.
SetPrevFreeBytes(size_t bytes)314   void SetPrevFreeBytes(size_t bytes) {
315     DCHECK_ALIGNED(bytes, FreeListSpace::kAlignment);
316     prev_free_ = bytes / FreeListSpace::kAlignment;
317   }
318 
319  private:
320   static constexpr uint32_t kFlagFree = 0x80000000;  // If block is free.
321   static constexpr uint32_t kFlagZygote = 0x40000000;  // If the large object is a zygote object.
322   static constexpr uint32_t kFlagsMask = ~(kFlagFree | kFlagZygote);  // Combined flags for masking.
323   // Contains the size of the previous free block with kAlignment as the unit. If 0 then the
324   // allocation before us is not free.
325   // These variables are undefined in the middle of allocations / free blocks.
326   uint32_t prev_free_;
327   // Allocation size of this object in kAlignment as the unit.
328   uint32_t alloc_size_;
329 };
330 
GetSlotIndexForAllocationInfo(const AllocationInfo * info) const331 size_t FreeListSpace::GetSlotIndexForAllocationInfo(const AllocationInfo* info) const {
332   DCHECK_GE(info, allocation_info_);
333   DCHECK_LT(info, reinterpret_cast<AllocationInfo*>(allocation_info_map_->End()));
334   return info - allocation_info_;
335 }
336 
GetAllocationInfoForAddress(uintptr_t address)337 AllocationInfo* FreeListSpace::GetAllocationInfoForAddress(uintptr_t address) {
338   return &allocation_info_[GetSlotIndexForAddress(address)];
339 }
340 
GetAllocationInfoForAddress(uintptr_t address) const341 const AllocationInfo* FreeListSpace::GetAllocationInfoForAddress(uintptr_t address) const {
342   return &allocation_info_[GetSlotIndexForAddress(address)];
343 }
344 
operator ()(const AllocationInfo * a,const AllocationInfo * b) const345 inline bool FreeListSpace::SortByPrevFree::operator()(const AllocationInfo* a,
346                                                       const AllocationInfo* b) const {
347   if (a->GetPrevFree() < b->GetPrevFree()) return true;
348   if (a->GetPrevFree() > b->GetPrevFree()) return false;
349   if (a->AlignSize() < b->AlignSize()) return true;
350   if (a->AlignSize() > b->AlignSize()) return false;
351   return reinterpret_cast<uintptr_t>(a) < reinterpret_cast<uintptr_t>(b);
352 }
353 
Create(const std::string & name,uint8_t * requested_begin,size_t size)354 FreeListSpace* FreeListSpace::Create(const std::string& name, uint8_t* requested_begin, size_t size) {
355   CHECK_EQ(size % kAlignment, 0U);
356   std::string error_msg;
357   MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
358                                          PROT_READ | PROT_WRITE, true, false, &error_msg);
359   CHECK(mem_map != nullptr) << "Failed to allocate large object space mem map: " << error_msg;
360   return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
361 }
362 
FreeListSpace(const std::string & name,MemMap * mem_map,uint8_t * begin,uint8_t * end)363 FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end)
364     : LargeObjectSpace(name, begin, end),
365       mem_map_(mem_map),
366       lock_("free list space lock", kAllocSpaceLock) {
367   const size_t space_capacity = end - begin;
368   free_end_ = space_capacity;
369   CHECK_ALIGNED(space_capacity, kAlignment);
370   const size_t alloc_info_size = sizeof(AllocationInfo) * (space_capacity / kAlignment);
371   std::string error_msg;
372   allocation_info_map_.reset(
373       MemMap::MapAnonymous("large object free list space allocation info map",
374                            nullptr, alloc_info_size, PROT_READ | PROT_WRITE,
375                            false, false, &error_msg));
376   CHECK(allocation_info_map_.get() != nullptr) << "Failed to allocate allocation info map"
377       << error_msg;
378   allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_->Begin());
379 }
380 
~FreeListSpace()381 FreeListSpace::~FreeListSpace() {}
382 
Walk(DlMallocSpace::WalkCallback callback,void * arg)383 void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
384   MutexLock mu(Thread::Current(), lock_);
385   const uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
386   AllocationInfo* cur_info = &allocation_info_[0];
387   const AllocationInfo* end_info = GetAllocationInfoForAddress(free_end_start);
388   while (cur_info < end_info) {
389     if (!cur_info->IsFree()) {
390       size_t alloc_size = cur_info->ByteSize();
391       uint8_t* byte_start = reinterpret_cast<uint8_t*>(GetAddressForAllocationInfo(cur_info));
392       uint8_t* byte_end = byte_start + alloc_size;
393       callback(byte_start, byte_end, alloc_size, arg);
394       callback(nullptr, nullptr, 0, arg);
395     }
396     cur_info = cur_info->GetNextInfo();
397   }
398   CHECK_EQ(cur_info, end_info);
399 }
400 
RemoveFreePrev(AllocationInfo * info)401 void FreeListSpace::RemoveFreePrev(AllocationInfo* info) {
402   CHECK_GT(info->GetPrevFree(), 0U);
403   auto it = free_blocks_.lower_bound(info);
404   CHECK(it != free_blocks_.end());
405   CHECK_EQ(*it, info);
406   free_blocks_.erase(it);
407 }
408 
Free(Thread * self,mirror::Object * obj)409 size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) {
410   MutexLock mu(self, lock_);
411   DCHECK(Contains(obj)) << reinterpret_cast<void*>(Begin()) << " " << obj << " "
412                         << reinterpret_cast<void*>(End());
413   DCHECK_ALIGNED(obj, kAlignment);
414   AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
415   DCHECK(!info->IsFree());
416   const size_t allocation_size = info->ByteSize();
417   DCHECK_GT(allocation_size, 0U);
418   DCHECK_ALIGNED(allocation_size, kAlignment);
419   info->SetByteSize(allocation_size, true);  // Mark as free.
420   // Look at the next chunk.
421   AllocationInfo* next_info = info->GetNextInfo();
422   // Calculate the start of the end free block.
423   uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
424   size_t prev_free_bytes = info->GetPrevFreeBytes();
425   size_t new_free_size = allocation_size;
426   if (prev_free_bytes != 0) {
427     // Coalesce with previous free chunk.
428     new_free_size += prev_free_bytes;
429     RemoveFreePrev(info);
430     info = info->GetPrevFreeInfo();
431     // The previous allocation info must not be free since we are supposed to always coalesce.
432     DCHECK_EQ(info->GetPrevFreeBytes(), 0U) << "Previous allocation was free";
433   }
434   uintptr_t next_addr = GetAddressForAllocationInfo(next_info);
435   if (next_addr >= free_end_start) {
436     // Easy case, the next chunk is the end free region.
437     CHECK_EQ(next_addr, free_end_start);
438     free_end_ += new_free_size;
439   } else {
440     AllocationInfo* new_free_info;
441     if (next_info->IsFree()) {
442       AllocationInfo* next_next_info = next_info->GetNextInfo();
443       // Next next info can't be free since we always coalesce.
444       DCHECK(!next_next_info->IsFree());
445       DCHECK_ALIGNED(next_next_info->ByteSize(), kAlignment);
446       new_free_info = next_next_info;
447       new_free_size += next_next_info->GetPrevFreeBytes();
448       RemoveFreePrev(next_next_info);
449     } else {
450       new_free_info = next_info;
451     }
452     new_free_info->SetPrevFreeBytes(new_free_size);
453     free_blocks_.insert(new_free_info);
454     info->SetByteSize(new_free_size, true);
455     DCHECK_EQ(info->GetNextInfo(), new_free_info);
456   }
457   --num_objects_allocated_;
458   DCHECK_LE(allocation_size, num_bytes_allocated_);
459   num_bytes_allocated_ -= allocation_size;
460   madvise(obj, allocation_size, MADV_DONTNEED);
461   if (kIsDebugBuild) {
462     // Can't disallow reads since we use them to find next chunks during coalescing.
463     mprotect(obj, allocation_size, PROT_READ);
464   }
465   return allocation_size;
466 }
467 
AllocationSize(mirror::Object * obj,size_t * usable_size)468 size_t FreeListSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
469   DCHECK(Contains(obj));
470   AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
471   DCHECK(!info->IsFree());
472   size_t alloc_size = info->ByteSize();
473   if (usable_size != nullptr) {
474     *usable_size = alloc_size;
475   }
476   return alloc_size;
477 }
478 
Alloc(Thread * self,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)479 mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
480                                      size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
481   MutexLock mu(self, lock_);
482   const size_t allocation_size = RoundUp(num_bytes, kAlignment);
483   AllocationInfo temp_info;
484   temp_info.SetPrevFreeBytes(allocation_size);
485   temp_info.SetByteSize(0, false);
486   AllocationInfo* new_info;
487   // Find the smallest chunk at least num_bytes in size.
488   auto it = free_blocks_.lower_bound(&temp_info);
489   if (it != free_blocks_.end()) {
490     AllocationInfo* info = *it;
491     free_blocks_.erase(it);
492     // Fit our object in the previous allocation info free space.
493     new_info = info->GetPrevFreeInfo();
494     // Remove the newly allocated block from the info and update the prev_free_.
495     info->SetPrevFreeBytes(info->GetPrevFreeBytes() - allocation_size);
496     if (info->GetPrevFreeBytes() > 0) {
497       AllocationInfo* new_free = info - info->GetPrevFree();
498       new_free->SetPrevFreeBytes(0);
499       new_free->SetByteSize(info->GetPrevFreeBytes(), true);
500       // If there is remaining space, insert back into the free set.
501       free_blocks_.insert(info);
502     }
503   } else {
504     // Try to steal some memory from the free space at the end of the space.
505     if (LIKELY(free_end_ >= allocation_size)) {
506       // Fit our object at the start of the end free block.
507       new_info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(End()) - free_end_);
508       free_end_ -= allocation_size;
509     } else {
510       return nullptr;
511     }
512   }
513   DCHECK(bytes_allocated != nullptr);
514   *bytes_allocated = allocation_size;
515   if (usable_size != nullptr) {
516     *usable_size = allocation_size;
517   }
518   DCHECK(bytes_tl_bulk_allocated != nullptr);
519   *bytes_tl_bulk_allocated = allocation_size;
520   // Need to do these inside of the lock.
521   ++num_objects_allocated_;
522   ++total_objects_allocated_;
523   num_bytes_allocated_ += allocation_size;
524   total_bytes_allocated_ += allocation_size;
525   mirror::Object* obj = reinterpret_cast<mirror::Object*>(GetAddressForAllocationInfo(new_info));
526   // We always put our object at the start of the free block, there cannot be another free block
527   // before it.
528   if (kIsDebugBuild) {
529     mprotect(obj, allocation_size, PROT_READ | PROT_WRITE);
530   }
531   new_info->SetPrevFreeBytes(0);
532   new_info->SetByteSize(allocation_size, false);
533   return obj;
534 }
535 
Dump(std::ostream & os) const536 void FreeListSpace::Dump(std::ostream& os) const {
537   MutexLock mu(Thread::Current(), lock_);
538   os << GetName() << " -"
539      << " begin: " << reinterpret_cast<void*>(Begin())
540      << " end: " << reinterpret_cast<void*>(End()) << "\n";
541   uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
542   const AllocationInfo* cur_info =
543       GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(Begin()));
544   const AllocationInfo* end_info = GetAllocationInfoForAddress(free_end_start);
545   while (cur_info < end_info) {
546     size_t size = cur_info->ByteSize();
547     uintptr_t address = GetAddressForAllocationInfo(cur_info);
548     if (cur_info->IsFree()) {
549       os << "Free block at address: " << reinterpret_cast<const void*>(address)
550          << " of length " << size << " bytes\n";
551     } else {
552       os << "Large object at address: " << reinterpret_cast<const void*>(address)
553          << " of length " << size << " bytes\n";
554     }
555     cur_info = cur_info->GetNextInfo();
556   }
557   if (free_end_) {
558     os << "Free block at address: " << reinterpret_cast<const void*>(free_end_start)
559        << " of length " << free_end_ << " bytes\n";
560   }
561 }
562 
IsZygoteLargeObject(Thread * self ATTRIBUTE_UNUSED,mirror::Object * obj) const563 bool FreeListSpace::IsZygoteLargeObject(Thread* self ATTRIBUTE_UNUSED, mirror::Object* obj) const {
564   const AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
565   DCHECK(info != nullptr);
566   return info->IsZygoteObject();
567 }
568 
SetAllLargeObjectsAsZygoteObjects(Thread * self)569 void FreeListSpace::SetAllLargeObjectsAsZygoteObjects(Thread* self) {
570   MutexLock mu(self, lock_);
571   uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
572   for (AllocationInfo* cur_info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(Begin())),
573       *end_info = GetAllocationInfoForAddress(free_end_start); cur_info < end_info;
574       cur_info = cur_info->GetNextInfo()) {
575     if (!cur_info->IsFree()) {
576       cur_info->SetZygoteObject();
577     }
578   }
579 }
580 
SweepCallback(size_t num_ptrs,mirror::Object ** ptrs,void * arg)581 void LargeObjectSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
582   SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
583   space::LargeObjectSpace* space = context->space->AsLargeObjectSpace();
584   Thread* self = context->self;
585   Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
586   // If the bitmaps aren't swapped we need to clear the bits since the GC isn't going to re-swap
587   // the bitmaps as an optimization.
588   if (!context->swap_bitmaps) {
589     accounting::LargeObjectBitmap* bitmap = space->GetLiveBitmap();
590     for (size_t i = 0; i < num_ptrs; ++i) {
591       bitmap->Clear(ptrs[i]);
592     }
593   }
594   context->freed.objects += num_ptrs;
595   context->freed.bytes += space->FreeList(self, num_ptrs, ptrs);
596 }
597 
Sweep(bool swap_bitmaps)598 collector::ObjectBytePair LargeObjectSpace::Sweep(bool swap_bitmaps) {
599   if (Begin() >= End()) {
600     return collector::ObjectBytePair(0, 0);
601   }
602   accounting::LargeObjectBitmap* live_bitmap = GetLiveBitmap();
603   accounting::LargeObjectBitmap* mark_bitmap = GetMarkBitmap();
604   if (swap_bitmaps) {
605     std::swap(live_bitmap, mark_bitmap);
606   }
607   AllocSpace::SweepCallbackContext scc(swap_bitmaps, this);
608   accounting::LargeObjectBitmap::SweepWalk(*live_bitmap, *mark_bitmap,
609                                            reinterpret_cast<uintptr_t>(Begin()),
610                                            reinterpret_cast<uintptr_t>(End()), SweepCallback, &scc);
611   return scc.freed;
612 }
613 
LogFragmentationAllocFailure(std::ostream &,size_t)614 void LargeObjectSpace::LogFragmentationAllocFailure(std::ostream& /*os*/,
615                                                     size_t /*failed_alloc_bytes*/) {
616   UNIMPLEMENTED(FATAL);
617 }
618 
619 }  // namespace space
620 }  // namespace gc
621 }  // namespace art
622