• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_SPACE_SPACE_H_
18 #define ART_RUNTIME_GC_SPACE_SPACE_H_
19 
20 #include <memory>
21 #include <string>
22 
23 #include "base/atomic.h"
24 #include "base/locks.h"
25 #include "base/macros.h"
26 #include "base/mem_map.h"
27 #include "gc/accounting/space_bitmap.h"
28 #include "gc/collector/object_byte_pair.h"
29 #include "runtime_globals.h"
30 
31 namespace art {
32 namespace mirror {
33 class Object;
34 }  // namespace mirror
35 
36 namespace gc {
37 
38 class Heap;
39 
40 namespace space {
41 
42 class AllocSpace;
43 class BumpPointerSpace;
44 class ContinuousMemMapAllocSpace;
45 class ContinuousSpace;
46 class DiscontinuousSpace;
47 class MallocSpace;
48 class DlMallocSpace;
49 class RosAllocSpace;
50 class ImageSpace;
51 class LargeObjectSpace;
52 class RegionSpace;
53 class ZygoteSpace;
54 
55 static constexpr bool kDebugSpaces = kIsDebugBuild;
56 
57 // See Space::GetGcRetentionPolicy.
58 enum GcRetentionPolicy {
59   // Objects are retained forever with this policy for a space.
60   kGcRetentionPolicyNeverCollect,
61   // Every GC cycle will attempt to collect objects in this space.
62   kGcRetentionPolicyAlwaysCollect,
63   // Objects will be considered for collection only in "full" GC cycles, ie faster partial
64   // collections won't scan these areas such as the Zygote.
65   kGcRetentionPolicyFullCollect,
66 };
67 std::ostream& operator<<(std::ostream& os, GcRetentionPolicy policy);
68 
69 enum SpaceType {
70   kSpaceTypeImageSpace,
71   kSpaceTypeMallocSpace,
72   kSpaceTypeZygoteSpace,
73   kSpaceTypeBumpPointerSpace,
74   kSpaceTypeLargeObjectSpace,
75   kSpaceTypeRegionSpace,
76 };
77 std::ostream& operator<<(std::ostream& os, SpaceType space_type);
78 
79 // A space contains memory allocated for managed objects.
80 class Space {
81  public:
82   // Dump space. Also key method for C++ vtables.
83   virtual void Dump(std::ostream& os) const;
84 
85   // Name of the space. May vary, for example before/after the Zygote fork.
GetName()86   const char* GetName() const {
87     return name_.c_str();
88   }
89 
90   // The policy of when objects are collected associated with this space.
GetGcRetentionPolicy()91   GcRetentionPolicy GetGcRetentionPolicy() const {
92     return gc_retention_policy_;
93   }
94 
95   // Is the given object contained within this space?
96   virtual bool Contains(const mirror::Object* obj) const = 0;
97 
98   // The kind of space this: image, alloc, zygote, large object.
99   virtual SpaceType GetType() const = 0;
100 
101   // Is this an image space, ie one backed by a memory mapped image file.
IsImageSpace()102   bool IsImageSpace() const {
103     return GetType() == kSpaceTypeImageSpace;
104   }
105   ImageSpace* AsImageSpace();
106 
107   // Is this a dlmalloc backed allocation space?
IsMallocSpace()108   bool IsMallocSpace() const {
109     SpaceType type = GetType();
110     return type == kSpaceTypeMallocSpace;
111   }
112   MallocSpace* AsMallocSpace();
113 
IsDlMallocSpace()114   virtual bool IsDlMallocSpace() const {
115     return false;
116   }
117   virtual DlMallocSpace* AsDlMallocSpace();
118 
IsRosAllocSpace()119   virtual bool IsRosAllocSpace() const {
120     return false;
121   }
122   virtual RosAllocSpace* AsRosAllocSpace();
123 
124   // Is this the space allocated into by the Zygote and no-longer in use for allocation?
IsZygoteSpace()125   bool IsZygoteSpace() const {
126     return GetType() == kSpaceTypeZygoteSpace;
127   }
128   virtual ZygoteSpace* AsZygoteSpace();
129 
130   // Is this space a bump pointer space?
IsBumpPointerSpace()131   bool IsBumpPointerSpace() const {
132     return GetType() == kSpaceTypeBumpPointerSpace;
133   }
134   virtual BumpPointerSpace* AsBumpPointerSpace();
135 
IsRegionSpace()136   bool IsRegionSpace() const {
137     return GetType() == kSpaceTypeRegionSpace;
138   }
139   virtual RegionSpace* AsRegionSpace();
140 
141   // Does this space hold large objects and implement the large object space abstraction?
IsLargeObjectSpace()142   bool IsLargeObjectSpace() const {
143     return GetType() == kSpaceTypeLargeObjectSpace;
144   }
145   LargeObjectSpace* AsLargeObjectSpace();
146 
IsContinuousSpace()147   virtual bool IsContinuousSpace() const {
148     return false;
149   }
150   ContinuousSpace* AsContinuousSpace();
151 
IsDiscontinuousSpace()152   virtual bool IsDiscontinuousSpace() const {
153     return false;
154   }
155   DiscontinuousSpace* AsDiscontinuousSpace();
156 
IsAllocSpace()157   virtual bool IsAllocSpace() const {
158     return false;
159   }
160   virtual AllocSpace* AsAllocSpace();
161 
IsContinuousMemMapAllocSpace()162   virtual bool IsContinuousMemMapAllocSpace() const {
163     return false;
164   }
165   virtual ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace();
166 
167   // Returns true if objects in the space are movable.
168   virtual bool CanMoveObjects() const = 0;
169 
~Space()170   virtual ~Space() {}
171 
172  protected:
173   Space(const std::string& name, GcRetentionPolicy gc_retention_policy);
174 
SetGcRetentionPolicy(GcRetentionPolicy gc_retention_policy)175   void SetGcRetentionPolicy(GcRetentionPolicy gc_retention_policy) {
176     gc_retention_policy_ = gc_retention_policy;
177   }
178 
179   // Name of the space that may vary due to the Zygote fork.
180   std::string name_;
181 
182  protected:
183   // When should objects within this space be reclaimed? Not constant as we vary it in the case
184   // of Zygote forking.
185   GcRetentionPolicy gc_retention_policy_;
186 
187  private:
188   friend class art::gc::Heap;
189   DISALLOW_IMPLICIT_CONSTRUCTORS(Space);
190 };
191 std::ostream& operator<<(std::ostream& os, const Space& space);
192 
193 // AllocSpace interface.
194 class AllocSpace {
195  public:
196   // Number of bytes currently allocated.
197   virtual uint64_t GetBytesAllocated() = 0;
198   // Number of objects currently allocated.
199   virtual uint64_t GetObjectsAllocated() = 0;
200 
201   // Allocate num_bytes without allowing growth. If the allocation
202   // succeeds, the output parameter bytes_allocated will be set to the
203   // actually allocated bytes which is >= num_bytes.
204   // Alloc can be called from multiple threads at the same time and must be thread-safe.
205   //
206   // bytes_tl_bulk_allocated - bytes allocated in bulk ahead of time for a thread local allocation,
207   // if applicable. It is
208   // 1) equal to bytes_allocated if it's not a thread local allocation,
209   // 2) greater than bytes_allocated if it's a thread local
210   //    allocation that required a new buffer, or
211   // 3) zero if it's a thread local allocation in an existing
212   //    buffer.
213   // This is what is to be added to Heap::num_bytes_allocated_.
214   virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
215                                 size_t* usable_size, size_t* bytes_tl_bulk_allocated) = 0;
216 
217   // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
AllocThreadUnsafe(Thread * self,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)218   virtual mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
219                                             size_t* usable_size,
220                                             size_t* bytes_tl_bulk_allocated)
221       REQUIRES(Locks::mutator_lock_) {
222     return Alloc(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
223   }
224 
225   // Return the storage space required by obj.
226   virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
227 
228   // Returns how many bytes were freed.
229   virtual size_t Free(Thread* self, mirror::Object* ptr) = 0;
230 
231   // Free (deallocate) all objects in a list, and return the number of bytes freed.
232   virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) = 0;
233 
234   // Revoke any sort of thread-local buffers that are used to speed up allocations for the given
235   // thread, if the alloc space implementation uses any.
236   // Returns the total free bytes in the revoked thread local runs that's to be subtracted
237   // from Heap::num_bytes_allocated_ or zero if unnecessary.
238   virtual size_t RevokeThreadLocalBuffers(Thread* thread) = 0;
239 
240   // Revoke any sort of thread-local buffers that are used to speed up allocations for all the
241   // threads, if the alloc space implementation uses any.
242   // Returns the total free bytes in the revoked thread local runs that's to be subtracted
243   // from Heap::num_bytes_allocated_ or zero if unnecessary.
244   virtual size_t RevokeAllThreadLocalBuffers() = 0;
245 
246   // Compute largest free contiguous chunk of memory available in the space and
247   // log it if it's smaller than failed_alloc_bytes and return true.
248   // Otherwise leave os untouched and return false.
249   virtual bool LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) = 0;
250 
251  protected:
252   struct SweepCallbackContext {
253     SweepCallbackContext(bool swap_bitmaps, space::Space* space);
254     const bool swap_bitmaps;
255     space::Space* const space;
256     Thread* const self;
257     collector::ObjectBytePair freed;
258   };
259 
AllocSpace()260   AllocSpace() {}
~AllocSpace()261   virtual ~AllocSpace() {}
262 
263  private:
264   DISALLOW_COPY_AND_ASSIGN(AllocSpace);
265 };
266 
267 // Continuous spaces have bitmaps, and an address range. Although not required, objects within
268 // continuous spaces can be marked in the card table.
269 class ContinuousSpace : public Space {
270  public:
271   // Address at which the space begins.
Begin()272   uint8_t* Begin() const {
273     return begin_;
274   }
275 
276   // Current address at which the space ends, which may vary as the space is filled.
End()277   uint8_t* End() const {
278     return end_.load(std::memory_order_relaxed);
279   }
280 
281   // The end of the address range covered by the space.
Limit()282   uint8_t* Limit() const {
283     return limit_;
284   }
285 
286   // Change the end of the space. Be careful with use since changing the end of a space to an
287   // invalid value may break the GC.
SetEnd(uint8_t * end)288   void SetEnd(uint8_t* end) {
289     end_.store(end, std::memory_order_relaxed);
290   }
291 
SetLimit(uint8_t * limit)292   void SetLimit(uint8_t* limit) {
293     limit_ = limit;
294   }
295 
296   // Current size of space
Size()297   size_t Size() const {
298     return End() - Begin();
299   }
300 
301   virtual accounting::ContinuousSpaceBitmap* GetLiveBitmap() = 0;
302   virtual accounting::ContinuousSpaceBitmap* GetMarkBitmap() = 0;
303 
304   // Maximum which the mapped space can grow to.
Capacity()305   virtual size_t Capacity() const {
306     return Limit() - Begin();
307   }
308 
309   // Is object within this space? We check to see if the pointer is beyond the end first as
310   // continuous spaces are iterated over from low to high.
HasAddress(const mirror::Object * obj)311   bool HasAddress(const mirror::Object* obj) const {
312     const uint8_t* byte_ptr = reinterpret_cast<const uint8_t*>(obj);
313     return byte_ptr >= Begin() && byte_ptr < Limit();
314   }
315 
Contains(const mirror::Object * obj)316   bool Contains(const mirror::Object* obj) const {
317     return HasAddress(obj);
318   }
319 
IsContinuousSpace()320   virtual bool IsContinuousSpace() const {
321     return true;
322   }
323 
324   bool HasBoundBitmaps() REQUIRES(Locks::heap_bitmap_lock_);
325 
~ContinuousSpace()326   virtual ~ContinuousSpace() {}
327 
328  protected:
ContinuousSpace(const std::string & name,GcRetentionPolicy gc_retention_policy,uint8_t * begin,uint8_t * end,uint8_t * limit)329   ContinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy,
330                   uint8_t* begin, uint8_t* end, uint8_t* limit) :
331       Space(name, gc_retention_policy), begin_(begin), end_(end), limit_(limit) {
332   }
333 
334   // The beginning of the storage for fast access.
335   uint8_t* begin_;
336 
337   // Current end of the space.
338   Atomic<uint8_t*> end_;
339 
340   // Limit of the space.
341   uint8_t* limit_;
342 
343  private:
344   DISALLOW_IMPLICIT_CONSTRUCTORS(ContinuousSpace);
345 };
346 
347 // A space where objects may be allocated higgledy-piggledy throughout virtual memory. Currently
348 // the card table can't cover these objects and so the write barrier shouldn't be triggered. This
349 // is suitable for use for large primitive arrays.
350 class DiscontinuousSpace : public Space {
351  public:
GetLiveBitmap()352   accounting::LargeObjectBitmap* GetLiveBitmap() {
353     return &live_bitmap_;
354   }
355 
GetMarkBitmap()356   accounting::LargeObjectBitmap* GetMarkBitmap() {
357     return &mark_bitmap_;
358   }
359 
IsDiscontinuousSpace()360   bool IsDiscontinuousSpace() const override {
361     return true;
362   }
363 
~DiscontinuousSpace()364   virtual ~DiscontinuousSpace() {}
365 
366  protected:
367   DiscontinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy);
368 
369   accounting::LargeObjectBitmap live_bitmap_;
370   accounting::LargeObjectBitmap mark_bitmap_;
371 
372  private:
373   DISALLOW_IMPLICIT_CONSTRUCTORS(DiscontinuousSpace);
374 };
375 
376 class MemMapSpace : public ContinuousSpace {
377  public:
378   // Size of the space without a limit on its growth. By default this is just the Capacity, but
379   // for the allocation space we support starting with a small heap and then extending it.
NonGrowthLimitCapacity()380   virtual size_t NonGrowthLimitCapacity() const {
381     return Capacity();
382   }
383 
GetMemMap()384   MemMap* GetMemMap() {
385     return &mem_map_;
386   }
387 
GetMemMap()388   const MemMap* GetMemMap() const {
389     return &mem_map_;
390   }
391 
ReleaseMemMap()392   MemMap ReleaseMemMap() {
393     return std::move(mem_map_);
394   }
395 
396  protected:
MemMapSpace(const std::string & name,MemMap && mem_map,uint8_t * begin,uint8_t * end,uint8_t * limit,GcRetentionPolicy gc_retention_policy)397   MemMapSpace(const std::string& name,
398               MemMap&& mem_map,
399               uint8_t* begin,
400               uint8_t* end,
401               uint8_t* limit,
402               GcRetentionPolicy gc_retention_policy)
403       : ContinuousSpace(name, gc_retention_policy, begin, end, limit),
404         mem_map_(std::move(mem_map)) {
405   }
406 
407   // Underlying storage of the space
408   MemMap mem_map_;
409 
410  private:
411   DISALLOW_IMPLICIT_CONSTRUCTORS(MemMapSpace);
412 };
413 
414 // Used by the heap compaction interface to enable copying from one type of alloc space to another.
415 class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
416  public:
IsAllocSpace()417   bool IsAllocSpace() const override {
418     return true;
419   }
AsAllocSpace()420   AllocSpace* AsAllocSpace() override {
421     return this;
422   }
423 
IsContinuousMemMapAllocSpace()424   bool IsContinuousMemMapAllocSpace() const override {
425     return true;
426   }
AsContinuousMemMapAllocSpace()427   ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() override {
428     return this;
429   }
430 
431   // Make the mark bitmap an alias of the live bitmap. Save the current mark bitmap into
432   // `temp_bitmap_`, so that we can restore it later in ContinuousMemMapAllocSpace::UnBindBitmaps.
433   void BindLiveToMarkBitmap() REQUIRES(Locks::heap_bitmap_lock_);
434   // Unalias the mark bitmap from the live bitmap and restore the old mark bitmap.
435   void UnBindBitmaps() REQUIRES(Locks::heap_bitmap_lock_);
436   // Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping.
437   void SwapBitmaps() REQUIRES(Locks::heap_bitmap_lock_);
438 
439   // Clear the space back to an empty space.
440   virtual void Clear() = 0;
441 
GetLiveBitmap()442   accounting::ContinuousSpaceBitmap* GetLiveBitmap() override {
443     return &live_bitmap_;
444   }
445 
GetMarkBitmap()446   accounting::ContinuousSpaceBitmap* GetMarkBitmap() override {
447     return &mark_bitmap_;
448   }
449 
GetTempBitmap()450   accounting::ContinuousSpaceBitmap* GetTempBitmap() {
451     return &temp_bitmap_;
452   }
453 
454   collector::ObjectBytePair Sweep(bool swap_bitmaps);
455   virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() = 0;
456 
457  protected:
458   accounting::ContinuousSpaceBitmap live_bitmap_;
459   accounting::ContinuousSpaceBitmap mark_bitmap_;
460   accounting::ContinuousSpaceBitmap temp_bitmap_;
461 
ContinuousMemMapAllocSpace(const std::string & name,MemMap && mem_map,uint8_t * begin,uint8_t * end,uint8_t * limit,GcRetentionPolicy gc_retention_policy)462   ContinuousMemMapAllocSpace(const std::string& name,
463                              MemMap&& mem_map,
464                              uint8_t* begin,
465                              uint8_t* end,
466                              uint8_t* limit,
467                              GcRetentionPolicy gc_retention_policy)
468       : MemMapSpace(name, std::move(mem_map), begin, end, limit, gc_retention_policy) {
469   }
470 
471  private:
472   friend class gc::Heap;
473   DISALLOW_IMPLICIT_CONSTRUCTORS(ContinuousMemMapAllocSpace);
474 };
475 
476 }  // namespace space
477 }  // namespace gc
478 }  // namespace art
479 
480 #endif  // ART_RUNTIME_GC_SPACE_SPACE_H_
481