• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_
18 #define ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_
19 
20 #include "base/allocator.h"
21 #include "base/safe_map.h"
22 #include "base/tracking_safe_map.h"
23 #include "dlmalloc_space.h"
24 #include "space.h"
25 
26 #include <set>
27 #include <vector>
28 
29 namespace art {
30 namespace gc {
31 namespace space {
32 
33 class AllocationInfo;
34 
35 enum class LargeObjectSpaceType {
36   kDisabled,
37   kMap,
38   kFreeList,
39 };
40 
41 // Abstraction implemented by all large object spaces.
42 class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
43  public:
GetType()44   SpaceType GetType() const OVERRIDE {
45     return kSpaceTypeLargeObjectSpace;
46   }
47   void SwapBitmaps();
48   void CopyLiveToMarked();
49   virtual void Walk(DlMallocSpace::WalkCallback, void* arg) = 0;
~LargeObjectSpace()50   virtual ~LargeObjectSpace() {}
51 
GetBytesAllocated()52   uint64_t GetBytesAllocated() OVERRIDE {
53     return num_bytes_allocated_;
54   }
GetObjectsAllocated()55   uint64_t GetObjectsAllocated() OVERRIDE {
56     return num_objects_allocated_;
57   }
GetTotalBytesAllocated()58   uint64_t GetTotalBytesAllocated() const {
59     return total_bytes_allocated_;
60   }
GetTotalObjectsAllocated()61   uint64_t GetTotalObjectsAllocated() const {
62     return total_objects_allocated_;
63   }
64   size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE;
65   // LargeObjectSpaces don't have thread local state.
RevokeThreadLocalBuffers(art::Thread *)66   size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
67     return 0U;
68   }
RevokeAllThreadLocalBuffers()69   size_t RevokeAllThreadLocalBuffers() OVERRIDE {
70     return 0U;
71   }
IsAllocSpace()72   bool IsAllocSpace() const OVERRIDE {
73     return true;
74   }
AsAllocSpace()75   AllocSpace* AsAllocSpace() OVERRIDE {
76     return this;
77   }
78   collector::ObjectBytePair Sweep(bool swap_bitmaps);
CanMoveObjects()79   virtual bool CanMoveObjects() const OVERRIDE {
80     return false;
81   }
82   // Current address at which the space begins, which may vary as the space is filled.
Begin()83   uint8_t* Begin() const {
84     return begin_;
85   }
86   // Current address at which the space ends, which may vary as the space is filled.
End()87   uint8_t* End() const {
88     return end_;
89   }
90   // Current size of space
Size()91   size_t Size() const {
92     return End() - Begin();
93   }
94   // Return true if we contain the specified address.
Contains(const mirror::Object * obj)95   bool Contains(const mirror::Object* obj) const {
96     const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
97     return Begin() <= byte_obj && byte_obj < End();
98   }
99   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
100       REQUIRES_SHARED(Locks::mutator_lock_);
101 
102   // Return true if the large object is a zygote large object. Potentially slow.
103   virtual bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const = 0;
104   // Called when we create the zygote space, mark all existing large objects as zygote large
105   // objects.
106   virtual void SetAllLargeObjectsAsZygoteObjects(Thread* self) = 0;
107 
108   // GetRangeAtomic returns Begin() and End() atomically, that is, it never returns Begin() and
109   // End() from different allocations.
110   virtual std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const = 0;
111 
112  protected:
113   explicit LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end);
114   static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
115 
116   // Approximate number of bytes which have been allocated into the space.
117   uint64_t num_bytes_allocated_;
118   uint64_t num_objects_allocated_;
119   uint64_t total_bytes_allocated_;
120   uint64_t total_objects_allocated_;
121   // Begin and end, may change as more large objects are allocated.
122   uint8_t* begin_;
123   uint8_t* end_;
124 
125   friend class Space;
126 
127  private:
128   DISALLOW_COPY_AND_ASSIGN(LargeObjectSpace);
129 };
130 
131 // A discontinuous large object space implemented by individual mmap/munmap calls.
132 class LargeObjectMapSpace : public LargeObjectSpace {
133  public:
134   // Creates a large object space. Allocations into the large object space use memory maps instead
135   // of malloc.
136   static LargeObjectMapSpace* Create(const std::string& name);
137   // Return the storage space required by obj.
138   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) REQUIRES(!lock_);
139   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
140                         size_t* usable_size, size_t* bytes_tl_bulk_allocated)
141       REQUIRES(!lock_);
142   size_t Free(Thread* self, mirror::Object* ptr) REQUIRES(!lock_);
143   void Walk(DlMallocSpace::WalkCallback, void* arg) OVERRIDE REQUIRES(!lock_);
144   // TODO: disabling thread safety analysis as this may be called when we already hold lock_.
145   bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
146 
147   std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const OVERRIDE REQUIRES(!lock_);
148 
149  protected:
150   struct LargeObject {
151     MemMap* mem_map;
152     bool is_zygote;
153   };
154   explicit LargeObjectMapSpace(const std::string& name);
~LargeObjectMapSpace()155   virtual ~LargeObjectMapSpace() {}
156 
157   bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE REQUIRES(!lock_);
158   void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_);
159 
160   // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
161   mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
162   AllocationTrackingSafeMap<mirror::Object*, LargeObject, kAllocatorTagLOSMaps> large_objects_
163       GUARDED_BY(lock_);
164 };
165 
166 // A continuous large object space with a free-list to handle holes.
167 class FreeListSpace FINAL : public LargeObjectSpace {
168  public:
169   static constexpr size_t kAlignment = kPageSize;
170 
171   virtual ~FreeListSpace();
172   static FreeListSpace* Create(const std::string& name, uint8_t* requested_begin, size_t capacity);
173   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
174       REQUIRES(lock_);
175   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
176                         size_t* usable_size, size_t* bytes_tl_bulk_allocated)
177       OVERRIDE REQUIRES(!lock_);
178   size_t Free(Thread* self, mirror::Object* obj) OVERRIDE REQUIRES(!lock_);
179   void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
180   void Dump(std::ostream& os) const REQUIRES(!lock_);
181 
182   std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const OVERRIDE REQUIRES(!lock_);
183 
184  protected:
185   FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end);
GetSlotIndexForAddress(uintptr_t address)186   size_t GetSlotIndexForAddress(uintptr_t address) const {
187     DCHECK(Contains(reinterpret_cast<mirror::Object*>(address)));
188     return (address - reinterpret_cast<uintptr_t>(Begin())) / kAlignment;
189   }
190   size_t GetSlotIndexForAllocationInfo(const AllocationInfo* info) const;
191   AllocationInfo* GetAllocationInfoForAddress(uintptr_t address);
192   const AllocationInfo* GetAllocationInfoForAddress(uintptr_t address) const;
GetAllocationAddressForSlot(size_t slot)193   uintptr_t GetAllocationAddressForSlot(size_t slot) const {
194     return reinterpret_cast<uintptr_t>(Begin()) + slot * kAlignment;
195   }
GetAddressForAllocationInfo(const AllocationInfo * info)196   uintptr_t GetAddressForAllocationInfo(const AllocationInfo* info) const {
197     return GetAllocationAddressForSlot(GetSlotIndexForAllocationInfo(info));
198   }
199   // Removes header from the free blocks set by finding the corresponding iterator and erasing it.
200   void RemoveFreePrev(AllocationInfo* info) REQUIRES(lock_);
201   bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE;
202   void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_);
203 
204   class SortByPrevFree {
205    public:
206     bool operator()(const AllocationInfo* a, const AllocationInfo* b) const;
207   };
208   typedef std::set<AllocationInfo*, SortByPrevFree,
209                    TrackingAllocator<AllocationInfo*, kAllocatorTagLOSFreeList>> FreeBlocks;
210 
211   // There is not footer for any allocations at the end of the space, so we keep track of how much
212   // free space there is at the end manually.
213   std::unique_ptr<MemMap> mem_map_;
214   // Side table for allocation info, one per page.
215   std::unique_ptr<MemMap> allocation_info_map_;
216   AllocationInfo* allocation_info_;
217 
218   mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
219   // Free bytes at the end of the space.
220   size_t free_end_ GUARDED_BY(lock_);
221   FreeBlocks free_blocks_ GUARDED_BY(lock_);
222 };
223 
224 }  // namespace space
225 }  // namespace gc
226 }  // namespace art
227 
228 #endif  // ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_
229