• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_
18 #define ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_
19 
20 #include "base/allocator.h"
21 #include "base/safe_map.h"
22 #include "base/tracking_safe_map.h"
23 #include "dlmalloc_space.h"
24 #include "space.h"
25 #include "thread-current-inl.h"
26 
27 #include <set>
28 #include <vector>
29 
30 namespace art {
31 namespace gc {
32 namespace space {
33 
34 class AllocationInfo;
35 
36 enum class LargeObjectSpaceType {
37   kDisabled,
38   kMap,
39   kFreeList,
40 };
41 
42 // Abstraction implemented by all large object spaces.
43 class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
44  public:
GetType()45   SpaceType GetType() const override {
46     return kSpaceTypeLargeObjectSpace;
47   }
48   void SwapBitmaps();
49   void CopyLiveToMarked();
50   virtual void Walk(DlMallocSpace::WalkCallback, void* arg) = 0;
~LargeObjectSpace()51   virtual ~LargeObjectSpace() {}
52 
GetBytesAllocated()53   uint64_t GetBytesAllocated() override {
54     MutexLock mu(Thread::Current(), lock_);
55     return num_bytes_allocated_;
56   }
GetObjectsAllocated()57   uint64_t GetObjectsAllocated() override {
58     MutexLock mu(Thread::Current(), lock_);
59     return num_objects_allocated_;
60   }
GetTotalBytesAllocated()61   uint64_t GetTotalBytesAllocated() const {
62     MutexLock mu(Thread::Current(), lock_);
63     return total_bytes_allocated_;
64   }
GetTotalObjectsAllocated()65   uint64_t GetTotalObjectsAllocated() const {
66     MutexLock mu(Thread::Current(), lock_);
67     return total_objects_allocated_;
68   }
69   size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override;
70   // LargeObjectSpaces don't have thread local state.
RevokeThreadLocalBuffers(art::Thread *)71   size_t RevokeThreadLocalBuffers(art::Thread*) override {
72     return 0U;
73   }
RevokeAllThreadLocalBuffers()74   size_t RevokeAllThreadLocalBuffers() override {
75     return 0U;
76   }
IsAllocSpace()77   bool IsAllocSpace() const override {
78     return true;
79   }
AsAllocSpace()80   AllocSpace* AsAllocSpace() override {
81     return this;
82   }
83   collector::ObjectBytePair Sweep(bool swap_bitmaps);
CanMoveObjects()84   bool CanMoveObjects() const override {
85     return false;
86   }
87   // Current address at which the space begins, which may vary as the space is filled.
Begin()88   uint8_t* Begin() const {
89     return begin_;
90   }
91   // Current address at which the space ends, which may vary as the space is filled.
End()92   uint8_t* End() const {
93     return end_;
94   }
95   // Current size of space
Size()96   size_t Size() const {
97     return End() - Begin();
98   }
99   // Return true if we contain the specified address.
Contains(const mirror::Object * obj)100   bool Contains(const mirror::Object* obj) const override {
101     const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
102     return Begin() <= byte_obj && byte_obj < End();
103   }
104   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
105       REQUIRES_SHARED(Locks::mutator_lock_);
106 
107   // Return true if the large object is a zygote large object. Potentially slow.
108   virtual bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const = 0;
109   // Called when we create the zygote space, mark all existing large objects as zygote large
110   // objects.
111   virtual void SetAllLargeObjectsAsZygoteObjects(Thread* self) = 0;
112 
113   virtual void ForEachMemMap(std::function<void(const MemMap&)> func) const = 0;
114   // GetRangeAtomic returns Begin() and End() atomically, that is, it never returns Begin() and
115   // End() from different allocations.
116   virtual std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const = 0;
117 
118  protected:
119   explicit LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end,
120                             const char* lock_name);
121   static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
122 
123   // Used to ensure mutual exclusion when the allocation spaces data structures,
124   // including the allocation counters below, are being modified.
125   mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
126 
127   // Number of bytes which have been allocated into the space and not yet freed. The count is also
128   // included in the identically named field in Heap. Counts actual allocated (after rounding),
129   // not requested, sizes. TODO: It would be cheaper to just maintain total allocated and total
130   // free counts.
131   uint64_t num_bytes_allocated_ GUARDED_BY(lock_);
132   uint64_t num_objects_allocated_ GUARDED_BY(lock_);
133 
134   // Totals for large objects ever allocated, including those that have since been deallocated.
135   // Never decremented.
136   uint64_t total_bytes_allocated_ GUARDED_BY(lock_);
137   uint64_t total_objects_allocated_ GUARDED_BY(lock_);
138 
139   // Begin and end, may change as more large objects are allocated.
140   uint8_t* begin_;
141   uint8_t* end_;
142 
143   friend class Space;
144 
145  private:
146   DISALLOW_COPY_AND_ASSIGN(LargeObjectSpace);
147 };
148 
149 // A discontinuous large object space implemented by individual mmap/munmap calls.
150 class LargeObjectMapSpace : public LargeObjectSpace {
151  public:
152   // Creates a large object space. Allocations into the large object space use memory maps instead
153   // of malloc.
154   static LargeObjectMapSpace* Create(const std::string& name);
155   // Return the storage space required by obj.
156   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override REQUIRES(!lock_);
157   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
158                         size_t* usable_size, size_t* bytes_tl_bulk_allocated) override
159       REQUIRES(!lock_);
160   size_t Free(Thread* self, mirror::Object* ptr) override REQUIRES(!lock_);
161   void Walk(DlMallocSpace::WalkCallback, void* arg) override REQUIRES(!lock_);
162   // TODO: disabling thread safety analysis as this may be called when we already hold lock_.
163   bool Contains(const mirror::Object* obj) const override NO_THREAD_SAFETY_ANALYSIS;
164   void ForEachMemMap(std::function<void(const MemMap&)> func) const override REQUIRES(!lock_);
165   std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_);
166 
167  protected:
168   struct LargeObject {
169     MemMap mem_map;
170     bool is_zygote;
171   };
172   explicit LargeObjectMapSpace(const std::string& name);
~LargeObjectMapSpace()173   virtual ~LargeObjectMapSpace() {}
174 
175   bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override REQUIRES(!lock_);
176   void SetAllLargeObjectsAsZygoteObjects(Thread* self) override REQUIRES(!lock_);
177 
178   AllocationTrackingSafeMap<mirror::Object*, LargeObject, kAllocatorTagLOSMaps> large_objects_
179       GUARDED_BY(lock_);
180 };
181 
182 // A continuous large object space with a free-list to handle holes.
183 class FreeListSpace final : public LargeObjectSpace {
184  public:
185   static constexpr size_t kAlignment = kPageSize;
186 
187   virtual ~FreeListSpace();
188   static FreeListSpace* Create(const std::string& name, size_t capacity);
189   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
190       REQUIRES(lock_);
191   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
192                         size_t* usable_size, size_t* bytes_tl_bulk_allocated)
193       override REQUIRES(!lock_);
194   size_t Free(Thread* self, mirror::Object* obj) override REQUIRES(!lock_);
195   void Walk(DlMallocSpace::WalkCallback callback, void* arg) override REQUIRES(!lock_);
196   void Dump(std::ostream& os) const override REQUIRES(!lock_);
197   void ForEachMemMap(std::function<void(const MemMap&)> func) const override REQUIRES(!lock_);
198   std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_);
199 
200  protected:
201   FreeListSpace(const std::string& name, MemMap&& mem_map, uint8_t* begin, uint8_t* end);
GetSlotIndexForAddress(uintptr_t address)202   size_t GetSlotIndexForAddress(uintptr_t address) const {
203     DCHECK(Contains(reinterpret_cast<mirror::Object*>(address)));
204     return (address - reinterpret_cast<uintptr_t>(Begin())) / kAlignment;
205   }
206   size_t GetSlotIndexForAllocationInfo(const AllocationInfo* info) const;
207   AllocationInfo* GetAllocationInfoForAddress(uintptr_t address);
208   const AllocationInfo* GetAllocationInfoForAddress(uintptr_t address) const;
GetAllocationAddressForSlot(size_t slot)209   uintptr_t GetAllocationAddressForSlot(size_t slot) const {
210     return reinterpret_cast<uintptr_t>(Begin()) + slot * kAlignment;
211   }
GetAddressForAllocationInfo(const AllocationInfo * info)212   uintptr_t GetAddressForAllocationInfo(const AllocationInfo* info) const {
213     return GetAllocationAddressForSlot(GetSlotIndexForAllocationInfo(info));
214   }
215   // Removes header from the free blocks set by finding the corresponding iterator and erasing it.
216   void RemoveFreePrev(AllocationInfo* info) REQUIRES(lock_);
217   bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override;
218   void SetAllLargeObjectsAsZygoteObjects(Thread* self) override REQUIRES(!lock_);
219 
220   class SortByPrevFree {
221    public:
222     bool operator()(const AllocationInfo* a, const AllocationInfo* b) const;
223   };
224   typedef std::set<AllocationInfo*, SortByPrevFree,
225                    TrackingAllocator<AllocationInfo*, kAllocatorTagLOSFreeList>> FreeBlocks;
226 
227   // There is not footer for any allocations at the end of the space, so we keep track of how much
228   // free space there is at the end manually.
229   MemMap mem_map_;
230   // Side table for allocation info, one per page.
231   MemMap allocation_info_map_;
232   AllocationInfo* allocation_info_;
233 
234   // Free bytes at the end of the space.
235   size_t free_end_ GUARDED_BY(lock_);
236   FreeBlocks free_blocks_ GUARDED_BY(lock_);
237 };
238 
239 }  // namespace space
240 }  // namespace gc
241 }  // namespace art
242 
243 #endif  // ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_
244