• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_HEAP_H_
18 #define ART_RUNTIME_GC_HEAP_H_
19 
20 #include <iosfwd>
21 #include <string>
22 #include <unordered_set>
23 #include <vector>
24 
25 #include "allocator_type.h"
26 #include "arch/instruction_set.h"
27 #include "atomic.h"
28 #include "base/mutex.h"
29 #include "base/time_utils.h"
30 #include "gc/gc_cause.h"
31 #include "gc/collector/gc_type.h"
32 #include "gc/collector/iteration.h"
33 #include "gc/collector_type.h"
34 #include "gc/space/large_object_space.h"
35 #include "globals.h"
36 #include "handle.h"
37 #include "obj_ptr.h"
38 #include "offsets.h"
39 #include "process_state.h"
40 #include "safe_map.h"
41 #include "verify_object.h"
42 
43 namespace art {
44 
45 class ConditionVariable;
46 class IsMarkedVisitor;
47 class Mutex;
48 class RootVisitor;
49 class StackVisitor;
50 class Thread;
51 class ThreadPool;
52 class TimingLogger;
53 class VariableSizedHandleScope;
54 
55 namespace mirror {
56   class Class;
57   class Object;
58 }  // namespace mirror
59 
60 namespace gc {
61 
62 class AllocationListener;
63 class AllocRecordObjectMap;
64 class GcPauseListener;
65 class ReferenceProcessor;
66 class TaskProcessor;
67 class Verification;
68 
69 namespace accounting {
70   template <typename T> class AtomicStack;
71   typedef AtomicStack<mirror::Object> ObjectStack;
72   class CardTable;
73   class HeapBitmap;
74   class ModUnionTable;
75   class ReadBarrierTable;
76   class RememberedSet;
77 }  // namespace accounting
78 
79 namespace collector {
80   class ConcurrentCopying;
81   class GarbageCollector;
82   class MarkCompact;
83   class MarkSweep;
84   class SemiSpace;
85 }  // namespace collector
86 
87 namespace allocator {
88   class RosAlloc;
89 }  // namespace allocator
90 
91 namespace space {
92   class AllocSpace;
93   class BumpPointerSpace;
94   class ContinuousMemMapAllocSpace;
95   class DiscontinuousSpace;
96   class DlMallocSpace;
97   class ImageSpace;
98   class LargeObjectSpace;
99   class MallocSpace;
100   class RegionSpace;
101   class RosAllocSpace;
102   class Space;
103   class ZygoteSpace;
104 }  // namespace space
105 
106 enum HomogeneousSpaceCompactResult {
107   // Success.
108   kSuccess,
109   // Reject due to disabled moving GC.
110   kErrorReject,
111   // Unsupported due to the current configuration.
112   kErrorUnsupported,
113   // System is shutting down.
114   kErrorVMShuttingDown,
115 };
116 
117 // If true, use rosalloc/RosAllocSpace instead of dlmalloc/DlMallocSpace
118 static constexpr bool kUseRosAlloc = true;
119 
120 // If true, use thread-local allocation stack.
121 static constexpr bool kUseThreadLocalAllocationStack = true;
122 
123 class Heap {
124  public:
125   // If true, measure the total allocation time.
126   static constexpr size_t kDefaultStartingSize = kPageSize;
127   static constexpr size_t kDefaultInitialSize = 2 * MB;
128   static constexpr size_t kDefaultMaximumSize = 256 * MB;
129   static constexpr size_t kDefaultNonMovingSpaceCapacity = 64 * MB;
130   static constexpr size_t kDefaultMaxFree = 2 * MB;
131   static constexpr size_t kDefaultMinFree = kDefaultMaxFree / 4;
132   static constexpr size_t kDefaultLongPauseLogThreshold = MsToNs(5);
133   static constexpr size_t kDefaultLongGCLogThreshold = MsToNs(100);
134   static constexpr size_t kDefaultTLABSize = 32 * KB;
135   static constexpr double kDefaultTargetUtilization = 0.5;
136   static constexpr double kDefaultHeapGrowthMultiplier = 2.0;
137   // Primitive arrays larger than this size are put in the large object space.
138   static constexpr size_t kMinLargeObjectThreshold = 3 * kPageSize;
139   static constexpr size_t kDefaultLargeObjectThreshold = kMinLargeObjectThreshold;
140   // Whether or not parallel GC is enabled. If not, then we never create the thread pool.
141   static constexpr bool kDefaultEnableParallelGC = false;
142 
143   // Whether or not we use the free list large object space. Only use it if USE_ART_LOW_4G_ALLOCATOR
144   // since this means that we have to use the slow msync loop in MemMap::MapAnonymous.
145   static constexpr space::LargeObjectSpaceType kDefaultLargeObjectSpaceType =
146       USE_ART_LOW_4G_ALLOCATOR ?
147           space::LargeObjectSpaceType::kFreeList
148         : space::LargeObjectSpaceType::kMap;
149 
150   // Used so that we don't overflow the allocation time atomic integer.
151   static constexpr size_t kTimeAdjust = 1024;
152 
153   // How often we allow heap trimming to happen (nanoseconds).
154   static constexpr uint64_t kHeapTrimWait = MsToNs(5000);
155   // How long we wait after a transition request to perform a collector transition (nanoseconds).
156   static constexpr uint64_t kCollectorTransitionWait = MsToNs(5000);
157 
158   // Create a heap with the requested sizes. The possible empty
159   // image_file_names names specify Spaces to load based on
160   // ImageWriter output.
161   Heap(size_t initial_size,
162        size_t growth_limit,
163        size_t min_free,
164        size_t max_free,
165        double target_utilization,
166        double foreground_heap_growth_multiplier,
167        size_t capacity,
168        size_t non_moving_space_capacity,
169        const std::string& original_image_file_name,
170        InstructionSet image_instruction_set,
171        CollectorType foreground_collector_type,
172        CollectorType background_collector_type,
173        space::LargeObjectSpaceType large_object_space_type,
174        size_t large_object_threshold,
175        size_t parallel_gc_threads,
176        size_t conc_gc_threads,
177        bool low_memory_mode,
178        size_t long_pause_threshold,
179        size_t long_gc_threshold,
180        bool ignore_max_footprint,
181        bool use_tlab,
182        bool verify_pre_gc_heap,
183        bool verify_pre_sweeping_heap,
184        bool verify_post_gc_heap,
185        bool verify_pre_gc_rosalloc,
186        bool verify_pre_sweeping_rosalloc,
187        bool verify_post_gc_rosalloc,
188        bool gc_stress_mode,
189        bool measure_gc_performance,
190        bool use_homogeneous_space_compaction,
191        uint64_t min_interval_homogeneous_space_compaction_by_oom);
192 
193   ~Heap();
194 
195   // Allocates and initializes storage for an object instance.
196   template <bool kInstrumented, typename PreFenceVisitor>
AllocObject(Thread * self,ObjPtr<mirror::Class> klass,size_t num_bytes,const PreFenceVisitor & pre_fence_visitor)197   mirror::Object* AllocObject(Thread* self,
198                               ObjPtr<mirror::Class> klass,
199                               size_t num_bytes,
200                               const PreFenceVisitor& pre_fence_visitor)
201       REQUIRES_SHARED(Locks::mutator_lock_)
202       REQUIRES(!*gc_complete_lock_,
203                !*pending_task_lock_,
204                !*backtrace_lock_,
205                !Roles::uninterruptible_) {
206     return AllocObjectWithAllocator<kInstrumented, true>(self,
207                                                          klass,
208                                                          num_bytes,
209                                                          GetCurrentAllocator(),
210                                                          pre_fence_visitor);
211   }
212 
213   template <bool kInstrumented, typename PreFenceVisitor>
AllocNonMovableObject(Thread * self,ObjPtr<mirror::Class> klass,size_t num_bytes,const PreFenceVisitor & pre_fence_visitor)214   mirror::Object* AllocNonMovableObject(Thread* self,
215                                         ObjPtr<mirror::Class> klass,
216                                         size_t num_bytes,
217                                         const PreFenceVisitor& pre_fence_visitor)
218       REQUIRES_SHARED(Locks::mutator_lock_)
219       REQUIRES(!*gc_complete_lock_,
220                !*pending_task_lock_,
221                !*backtrace_lock_,
222                !Roles::uninterruptible_) {
223     return AllocObjectWithAllocator<kInstrumented, true>(self,
224                                                          klass,
225                                                          num_bytes,
226                                                          GetCurrentNonMovingAllocator(),
227                                                          pre_fence_visitor);
228   }
229 
230   template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
231   ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(Thread* self,
232                                                          ObjPtr<mirror::Class> klass,
233                                                          size_t byte_count,
234                                                          AllocatorType allocator,
235                                                          const PreFenceVisitor& pre_fence_visitor)
236       REQUIRES_SHARED(Locks::mutator_lock_)
237       REQUIRES(!*gc_complete_lock_,
238                !*pending_task_lock_,
239                !*backtrace_lock_,
240                !Roles::uninterruptible_);
241 
GetCurrentAllocator()242   AllocatorType GetCurrentAllocator() const {
243     return current_allocator_;
244   }
245 
GetCurrentNonMovingAllocator()246   AllocatorType GetCurrentNonMovingAllocator() const {
247     return current_non_moving_allocator_;
248   }
249 
250   // Visit all of the live objects in the heap.
251   template <typename Visitor>
252   ALWAYS_INLINE void VisitObjects(Visitor&& visitor)
253       REQUIRES_SHARED(Locks::mutator_lock_)
254       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
255   template <typename Visitor>
256   ALWAYS_INLINE void VisitObjectsPaused(Visitor&& visitor)
257       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
258 
259   void CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count)
260       REQUIRES_SHARED(Locks::mutator_lock_);
261 
262   void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
263       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*native_blocking_gc_lock_);
264   void RegisterNativeFree(JNIEnv* env, size_t bytes);
265 
266   // Change the allocator, updates entrypoints.
267   void ChangeAllocator(AllocatorType allocator)
268       REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_);
269 
270   // Transition the garbage collector during runtime, may copy objects from one space to another.
271   void TransitionCollector(CollectorType collector_type) REQUIRES(!*gc_complete_lock_);
272 
273   // Change the collector to be one of the possible options (MS, CMS, SS).
274   void ChangeCollector(CollectorType collector_type)
275       REQUIRES(Locks::mutator_lock_);
276 
277   // The given reference is believed to be to an object in the Java heap, check the soundness of it.
278   // TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a
279   // proper lock ordering for it.
280   void VerifyObjectBody(ObjPtr<mirror::Object> o) NO_THREAD_SAFETY_ANALYSIS;
281 
282   // Check sanity of all live references.
283   void VerifyHeap() REQUIRES(!Locks::heap_bitmap_lock_);
284   // Returns how many failures occured.
285   size_t VerifyHeapReferences(bool verify_referents = true)
286       REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
287   bool VerifyMissingCardMarks()
288       REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
289 
290   // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
291   // and doesn't abort on error, allowing the caller to report more
292   // meaningful diagnostics.
293   bool IsValidObjectAddress(const void* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
294 
295   // Faster alternative to IsHeapAddress since finding if an object is in the large object space is
296   // very slow.
297   bool IsNonDiscontinuousSpaceHeapAddress(const void* addr) const
298       REQUIRES_SHARED(Locks::mutator_lock_);
299 
300   // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
301   // Requires the heap lock to be held.
302   bool IsLiveObjectLocked(ObjPtr<mirror::Object> obj,
303                           bool search_allocation_stack = true,
304                           bool search_live_stack = true,
305                           bool sorted = false)
306       REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
307 
308   // Returns true if there is any chance that the object (obj) will move.
309   bool IsMovableObject(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_);
310 
311   // Enables us to compacting GC until objects are released.
312   void IncrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
313   void DecrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
314 
315   // Temporarily disable thread flip for JNI critical calls.
316   void IncrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_);
317   void DecrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_);
318   void ThreadFlipBegin(Thread* self) REQUIRES(!*thread_flip_lock_);
319   void ThreadFlipEnd(Thread* self) REQUIRES(!*thread_flip_lock_);
320 
321   // Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits.
322   // Mutator lock is required for GetContinuousSpaces.
323   void ClearMarkedObjects()
324       REQUIRES(Locks::heap_bitmap_lock_)
325       REQUIRES_SHARED(Locks::mutator_lock_);
326 
327   // Initiates an explicit garbage collection.
328   void CollectGarbage(bool clear_soft_references)
329       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
330 
331   // Does a concurrent GC, should only be called by the GC daemon thread
332   // through runtime.
333   void ConcurrentGC(Thread* self, GcCause cause, bool force_full)
334       REQUIRES(!Locks::runtime_shutdown_lock_, !*gc_complete_lock_, !*pending_task_lock_);
335 
336   // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
337   // The boolean decides whether to use IsAssignableFrom or == when comparing classes.
338   void CountInstances(const std::vector<Handle<mirror::Class>>& classes,
339                       bool use_is_assignable_from,
340                       uint64_t* counts)
341       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
342       REQUIRES_SHARED(Locks::mutator_lock_);
343 
344   // Implements JDWP RT_Instances.
345   void GetInstances(VariableSizedHandleScope& scope,
346                     Handle<mirror::Class> c,
347                     int32_t max_count,
348                     std::vector<Handle<mirror::Object>>& instances)
349       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
350       REQUIRES_SHARED(Locks::mutator_lock_);
351 
352   // Implements JDWP OR_ReferringObjects.
353   void GetReferringObjects(VariableSizedHandleScope& scope,
354                            Handle<mirror::Object> o,
355                            int32_t max_count,
356                            std::vector<Handle<mirror::Object>>& referring_objects)
357       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
358       REQUIRES_SHARED(Locks::mutator_lock_);
359 
360   // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
361   // implement dalvik.system.VMRuntime.clearGrowthLimit.
362   void ClearGrowthLimit();
363 
364   // Make the current growth limit the new maximum capacity, unmaps pages at the end of spaces
365   // which will never be used. Used to implement dalvik.system.VMRuntime.clampGrowthLimit.
366   void ClampGrowthLimit() REQUIRES(!Locks::heap_bitmap_lock_);
367 
368   // Target ideal heap utilization ratio, implements
369   // dalvik.system.VMRuntime.getTargetHeapUtilization.
GetTargetHeapUtilization()370   double GetTargetHeapUtilization() const {
371     return target_utilization_;
372   }
373 
374   // Data structure memory usage tracking.
375   void RegisterGCAllocation(size_t bytes);
376   void RegisterGCDeAllocation(size_t bytes);
377 
378   // Set the heap's private space pointers to be the same as the space based on it's type. Public
379   // due to usage by tests.
380   void SetSpaceAsDefault(space::ContinuousSpace* continuous_space)
381       REQUIRES(!Locks::heap_bitmap_lock_);
382   void AddSpace(space::Space* space)
383       REQUIRES(!Locks::heap_bitmap_lock_)
384       REQUIRES(Locks::mutator_lock_);
385   void RemoveSpace(space::Space* space)
386     REQUIRES(!Locks::heap_bitmap_lock_)
387     REQUIRES(Locks::mutator_lock_);
388 
389   // Set target ideal heap utilization ratio, implements
390   // dalvik.system.VMRuntime.setTargetHeapUtilization.
391   void SetTargetHeapUtilization(float target);
392 
393   // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate
394   // from the system. Doesn't allow the space to exceed its growth limit.
395   void SetIdealFootprint(size_t max_allowed_footprint);
396 
397   // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
398   // waited for.
399   collector::GcType WaitForGcToComplete(GcCause cause, Thread* self) REQUIRES(!*gc_complete_lock_);
400 
401   // Update the heap's process state to a new value, may cause compaction to occur.
402   void UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state)
403       REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
404 
HaveContinuousSpaces()405   bool HaveContinuousSpaces() const NO_THREAD_SAFETY_ANALYSIS {
406     // No lock since vector empty is thread safe.
407     return !continuous_spaces_.empty();
408   }
409 
GetContinuousSpaces()410   const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const
411       REQUIRES_SHARED(Locks::mutator_lock_) {
412     return continuous_spaces_;
413   }
414 
GetDiscontinuousSpaces()415   const std::vector<space::DiscontinuousSpace*>& GetDiscontinuousSpaces() const {
416     return discontinuous_spaces_;
417   }
418 
GetCurrentGcIteration()419   const collector::Iteration* GetCurrentGcIteration() const {
420     return &current_gc_iteration_;
421   }
GetCurrentGcIteration()422   collector::Iteration* GetCurrentGcIteration() {
423     return &current_gc_iteration_;
424   }
425 
426   // Enable verification of object references when the runtime is sufficiently initialized.
EnableObjectValidation()427   void EnableObjectValidation() {
428     verify_object_mode_ = kVerifyObjectSupport;
429     if (verify_object_mode_ > kVerifyObjectModeDisabled) {
430       VerifyHeap();
431     }
432   }
433 
434   // Disable object reference verification for image writing.
DisableObjectValidation()435   void DisableObjectValidation() {
436     verify_object_mode_ = kVerifyObjectModeDisabled;
437   }
438 
439   // Other checks may be performed if we know the heap should be in a sane state.
IsObjectValidationEnabled()440   bool IsObjectValidationEnabled() const {
441     return verify_object_mode_ > kVerifyObjectModeDisabled;
442   }
443 
444   // Returns true if low memory mode is enabled.
IsLowMemoryMode()445   bool IsLowMemoryMode() const {
446     return low_memory_mode_;
447   }
448 
449   // Returns the heap growth multiplier, this affects how much we grow the heap after a GC.
450   // Scales heap growth, min free, and max free.
451   double HeapGrowthMultiplier() const;
452 
453   // Freed bytes can be negative in cases where we copy objects from a compacted space to a
454   // free-list backed space.
455   void RecordFree(uint64_t freed_objects, int64_t freed_bytes);
456 
457   // Record the bytes freed by thread-local buffer revoke.
458   void RecordFreeRevoke();
459 
460   // Must be called if a field of an Object in the heap changes, and before any GC safe-point.
461   // The call is not needed if null is stored in the field.
462   ALWAYS_INLINE void WriteBarrierField(ObjPtr<mirror::Object> dst,
463                                        MemberOffset offset,
464                                        ObjPtr<mirror::Object> new_value)
465       REQUIRES_SHARED(Locks::mutator_lock_);
466 
467   // Write barrier for array operations that update many field positions
468   ALWAYS_INLINE void WriteBarrierArray(ObjPtr<mirror::Object> dst,
469                                        int start_offset,
470                                        // TODO: element_count or byte_count?
471                                        size_t length)
472       REQUIRES_SHARED(Locks::mutator_lock_);
473 
474   ALWAYS_INLINE void WriteBarrierEveryFieldOf(ObjPtr<mirror::Object> obj)
475       REQUIRES_SHARED(Locks::mutator_lock_);
476 
GetCardTable()477   accounting::CardTable* GetCardTable() const {
478     return card_table_.get();
479   }
480 
GetReadBarrierTable()481   accounting::ReadBarrierTable* GetReadBarrierTable() const {
482     return rb_table_.get();
483   }
484 
485   void AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object);
486 
487   // Returns the number of bytes currently allocated.
GetBytesAllocated()488   size_t GetBytesAllocated() const {
489     return num_bytes_allocated_.LoadSequentiallyConsistent();
490   }
491 
492   // Returns the number of objects currently allocated.
493   size_t GetObjectsAllocated() const
494       REQUIRES(!Locks::heap_bitmap_lock_);
495 
496   // Returns the total number of objects allocated since the heap was created.
497   uint64_t GetObjectsAllocatedEver() const;
498 
499   // Returns the total number of bytes allocated since the heap was created.
500   uint64_t GetBytesAllocatedEver() const;
501 
502   // Returns the total number of objects freed since the heap was created.
GetObjectsFreedEver()503   uint64_t GetObjectsFreedEver() const {
504     return total_objects_freed_ever_;
505   }
506 
507   // Returns the total number of bytes freed since the heap was created.
GetBytesFreedEver()508   uint64_t GetBytesFreedEver() const {
509     return total_bytes_freed_ever_;
510   }
511 
512   // Implements java.lang.Runtime.maxMemory, returning the maximum amount of memory a program can
513   // consume. For a regular VM this would relate to the -Xmx option and would return -1 if no Xmx
514   // were specified. Android apps start with a growth limit (small heap size) which is
515   // cleared/extended for large apps.
GetMaxMemory()516   size_t GetMaxMemory() const {
517     // There is some race conditions in the allocation code that can cause bytes allocated to
518     // become larger than growth_limit_ in rare cases.
519     return std::max(GetBytesAllocated(), growth_limit_);
520   }
521 
522   // Implements java.lang.Runtime.totalMemory, returning approximate amount of memory currently
523   // consumed by an application.
524   size_t GetTotalMemory() const;
525 
526   // Returns approximately how much free memory we have until the next GC happens.
GetFreeMemoryUntilGC()527   size_t GetFreeMemoryUntilGC() const {
528     return max_allowed_footprint_ - GetBytesAllocated();
529   }
530 
531   // Returns approximately how much free memory we have until the next OOME happens.
GetFreeMemoryUntilOOME()532   size_t GetFreeMemoryUntilOOME() const {
533     return growth_limit_ - GetBytesAllocated();
534   }
535 
536   // Returns how much free memory we have until we need to grow the heap to perform an allocation.
537   // Similar to GetFreeMemoryUntilGC. Implements java.lang.Runtime.freeMemory.
GetFreeMemory()538   size_t GetFreeMemory() const {
539     size_t byte_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
540     size_t total_memory = GetTotalMemory();
541     // Make sure we don't get a negative number.
542     return total_memory - std::min(total_memory, byte_allocated);
543   }
544 
545   // get the space that corresponds to an object's address. Current implementation searches all
546   // spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
547   // TODO: consider using faster data structure like binary tree.
548   space::ContinuousSpace* FindContinuousSpaceFromObject(ObjPtr<mirror::Object>, bool fail_ok) const
549       REQUIRES_SHARED(Locks::mutator_lock_);
550 
551   space::ContinuousSpace* FindContinuousSpaceFromAddress(const mirror::Object* addr) const
552       REQUIRES_SHARED(Locks::mutator_lock_);
553 
554   space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object>,
555                                                               bool fail_ok) const
556       REQUIRES_SHARED(Locks::mutator_lock_);
557 
558   space::Space* FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const
559       REQUIRES_SHARED(Locks::mutator_lock_);
560 
561   space::Space* FindSpaceFromAddress(const void* ptr) const
562       REQUIRES_SHARED(Locks::mutator_lock_);
563 
564   void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_);
565 
566   // Do a pending collector transition.
567   void DoPendingCollectorTransition() REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
568 
569   // Deflate monitors, ... and trim the spaces.
570   void Trim(Thread* self) REQUIRES(!*gc_complete_lock_);
571 
572   void RevokeThreadLocalBuffers(Thread* thread);
573   void RevokeRosAllocThreadLocalBuffers(Thread* thread);
574   void RevokeAllThreadLocalBuffers();
575   void AssertThreadLocalBuffersAreRevoked(Thread* thread);
576   void AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
577   void RosAllocVerification(TimingLogger* timings, const char* name)
578       REQUIRES(Locks::mutator_lock_);
579 
GetLiveBitmap()580   accounting::HeapBitmap* GetLiveBitmap() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
581     return live_bitmap_.get();
582   }
583 
GetMarkBitmap()584   accounting::HeapBitmap* GetMarkBitmap() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
585     return mark_bitmap_.get();
586   }
587 
GetLiveStack()588   accounting::ObjectStack* GetLiveStack() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
589     return live_stack_.get();
590   }
591 
592   void PreZygoteFork() NO_THREAD_SAFETY_ANALYSIS;
593 
594   // Mark and empty stack.
595   void FlushAllocStack()
596       REQUIRES_SHARED(Locks::mutator_lock_)
597       REQUIRES(Locks::heap_bitmap_lock_);
598 
599   // Revoke all the thread-local allocation stacks.
600   void RevokeAllThreadLocalAllocationStacks(Thread* self)
601       REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_);
602 
603   // Mark all the objects in the allocation stack in the specified bitmap.
604   // TODO: Refactor?
605   void MarkAllocStack(accounting::SpaceBitmap<kObjectAlignment>* bitmap1,
606                       accounting::SpaceBitmap<kObjectAlignment>* bitmap2,
607                       accounting::SpaceBitmap<kLargeObjectAlignment>* large_objects,
608                       accounting::ObjectStack* stack)
609       REQUIRES_SHARED(Locks::mutator_lock_)
610       REQUIRES(Locks::heap_bitmap_lock_);
611 
612   // Mark the specified allocation stack as live.
613   void MarkAllocStackAsLive(accounting::ObjectStack* stack)
614       REQUIRES_SHARED(Locks::mutator_lock_)
615       REQUIRES(Locks::heap_bitmap_lock_);
616 
617   // Unbind any bound bitmaps.
618   void UnBindBitmaps()
619       REQUIRES(Locks::heap_bitmap_lock_)
620       REQUIRES_SHARED(Locks::mutator_lock_);
621 
622   // Returns the boot image spaces. There may be multiple boot image spaces.
GetBootImageSpaces()623   const std::vector<space::ImageSpace*>& GetBootImageSpaces() const {
624     return boot_image_spaces_;
625   }
626 
627   bool ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const
628       REQUIRES_SHARED(Locks::mutator_lock_);
629 
630   bool IsInBootImageOatFile(const void* p) const
631       REQUIRES_SHARED(Locks::mutator_lock_);
632 
633   void GetBootImagesSize(uint32_t* boot_image_begin,
634                          uint32_t* boot_image_end,
635                          uint32_t* boot_oat_begin,
636                          uint32_t* boot_oat_end);
637 
638   // Permenantly disable moving garbage collection.
639   void DisableMovingGc() REQUIRES(!*gc_complete_lock_);
640 
GetDlMallocSpace()641   space::DlMallocSpace* GetDlMallocSpace() const {
642     return dlmalloc_space_;
643   }
644 
GetRosAllocSpace()645   space::RosAllocSpace* GetRosAllocSpace() const {
646     return rosalloc_space_;
647   }
648 
649   // Return the corresponding rosalloc space.
650   space::RosAllocSpace* GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const
651       REQUIRES_SHARED(Locks::mutator_lock_);
652 
GetNonMovingSpace()653   space::MallocSpace* GetNonMovingSpace() const {
654     return non_moving_space_;
655   }
656 
GetLargeObjectsSpace()657   space::LargeObjectSpace* GetLargeObjectsSpace() const {
658     return large_object_space_;
659   }
660 
661   // Returns the free list space that may contain movable objects (the
662   // one that's not the non-moving space), either rosalloc_space_ or
663   // dlmalloc_space_.
GetPrimaryFreeListSpace()664   space::MallocSpace* GetPrimaryFreeListSpace() {
665     if (kUseRosAlloc) {
666       DCHECK(rosalloc_space_ != nullptr);
667       // reinterpret_cast is necessary as the space class hierarchy
668       // isn't known (#included) yet here.
669       return reinterpret_cast<space::MallocSpace*>(rosalloc_space_);
670     } else {
671       DCHECK(dlmalloc_space_ != nullptr);
672       return reinterpret_cast<space::MallocSpace*>(dlmalloc_space_);
673     }
674   }
675 
676   void DumpSpaces(std::ostream& stream) const REQUIRES_SHARED(Locks::mutator_lock_);
677   std::string DumpSpaces() const REQUIRES_SHARED(Locks::mutator_lock_);
678 
679   // GC performance measuring
680   void DumpGcPerformanceInfo(std::ostream& os)
681       REQUIRES(!*gc_complete_lock_);
682   void ResetGcPerformanceInfo() REQUIRES(!*gc_complete_lock_);
683 
684   // Thread pool.
685   void CreateThreadPool();
686   void DeleteThreadPool();
GetThreadPool()687   ThreadPool* GetThreadPool() {
688     return thread_pool_.get();
689   }
GetParallelGCThreadCount()690   size_t GetParallelGCThreadCount() const {
691     return parallel_gc_threads_;
692   }
GetConcGCThreadCount()693   size_t GetConcGCThreadCount() const {
694     return conc_gc_threads_;
695   }
696   accounting::ModUnionTable* FindModUnionTableFromSpace(space::Space* space);
697   void AddModUnionTable(accounting::ModUnionTable* mod_union_table);
698 
699   accounting::RememberedSet* FindRememberedSetFromSpace(space::Space* space);
700   void AddRememberedSet(accounting::RememberedSet* remembered_set);
701   // Also deletes the remebered set.
702   void RemoveRememberedSet(space::Space* space);
703 
704   bool IsCompilingBoot() const;
HasBootImageSpace()705   bool HasBootImageSpace() const {
706     return !boot_image_spaces_.empty();
707   }
708 
GetReferenceProcessor()709   ReferenceProcessor* GetReferenceProcessor() {
710     return reference_processor_.get();
711   }
GetTaskProcessor()712   TaskProcessor* GetTaskProcessor() {
713     return task_processor_.get();
714   }
715 
HasZygoteSpace()716   bool HasZygoteSpace() const {
717     return zygote_space_ != nullptr;
718   }
719 
ConcurrentCopyingCollector()720   collector::ConcurrentCopying* ConcurrentCopyingCollector() {
721     return concurrent_copying_collector_;
722   }
723 
CurrentCollectorType()724   CollectorType CurrentCollectorType() {
725     return collector_type_;
726   }
727 
IsGcConcurrentAndMoving()728   bool IsGcConcurrentAndMoving() const {
729     if (IsGcConcurrent() && IsMovingGc(collector_type_)) {
730       // Assume no transition when a concurrent moving collector is used.
731       DCHECK_EQ(collector_type_, foreground_collector_type_);
732       return true;
733     }
734     return false;
735   }
736 
IsMovingGCDisabled(Thread * self)737   bool IsMovingGCDisabled(Thread* self) REQUIRES(!*gc_complete_lock_) {
738     MutexLock mu(self, *gc_complete_lock_);
739     return disable_moving_gc_count_ > 0;
740   }
741 
742   // Request an asynchronous trim.
743   void RequestTrim(Thread* self) REQUIRES(!*pending_task_lock_);
744 
745   // Request asynchronous GC.
746   void RequestConcurrentGC(Thread* self, GcCause cause, bool force_full)
747       REQUIRES(!*pending_task_lock_);
748 
749   // Whether or not we may use a garbage collector, used so that we only create collectors we need.
750   bool MayUseCollector(CollectorType type) const;
751 
752   // Used by tests to reduce timinig-dependent flakiness in OOME behavior.
SetMinIntervalHomogeneousSpaceCompactionByOom(uint64_t interval)753   void SetMinIntervalHomogeneousSpaceCompactionByOom(uint64_t interval) {
754     min_interval_homogeneous_space_compaction_by_oom_ = interval;
755   }
756 
757   // Helpers for android.os.Debug.getRuntimeStat().
758   uint64_t GetGcCount() const;
759   uint64_t GetGcTime() const;
760   uint64_t GetBlockingGcCount() const;
761   uint64_t GetBlockingGcTime() const;
762   void DumpGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_);
763   void DumpBlockingGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_);
764 
765   // Allocation tracking support
766   // Callers to this function use double-checked locking to ensure safety on allocation_records_
IsAllocTrackingEnabled()767   bool IsAllocTrackingEnabled() const {
768     return alloc_tracking_enabled_.LoadRelaxed();
769   }
770 
SetAllocTrackingEnabled(bool enabled)771   void SetAllocTrackingEnabled(bool enabled) REQUIRES(Locks::alloc_tracker_lock_) {
772     alloc_tracking_enabled_.StoreRelaxed(enabled);
773   }
774 
GetAllocationRecords()775   AllocRecordObjectMap* GetAllocationRecords() const
776       REQUIRES(Locks::alloc_tracker_lock_) {
777     return allocation_records_.get();
778   }
779 
780   void SetAllocationRecords(AllocRecordObjectMap* records)
781       REQUIRES(Locks::alloc_tracker_lock_);
782 
783   void VisitAllocationRecords(RootVisitor* visitor) const
784       REQUIRES_SHARED(Locks::mutator_lock_)
785       REQUIRES(!Locks::alloc_tracker_lock_);
786 
787   void SweepAllocationRecords(IsMarkedVisitor* visitor) const
788       REQUIRES_SHARED(Locks::mutator_lock_)
789       REQUIRES(!Locks::alloc_tracker_lock_);
790 
791   void DisallowNewAllocationRecords() const
792       REQUIRES_SHARED(Locks::mutator_lock_)
793       REQUIRES(!Locks::alloc_tracker_lock_);
794 
795   void AllowNewAllocationRecords() const
796       REQUIRES_SHARED(Locks::mutator_lock_)
797       REQUIRES(!Locks::alloc_tracker_lock_);
798 
799   void BroadcastForNewAllocationRecords() const
800       REQUIRES(!Locks::alloc_tracker_lock_);
801 
802   void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_);
803 
804   // Create a new alloc space and compact default alloc space to it.
805   HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact() REQUIRES(!*gc_complete_lock_);
806   bool SupportHomogeneousSpaceCompactAndCollectorTransitions() const;
807 
808   // Install an allocation listener.
809   void SetAllocationListener(AllocationListener* l);
810   // Remove an allocation listener. Note: the listener must not be deleted, as for performance
811   // reasons, we assume it stays valid when we read it (so that we don't require a lock).
812   void RemoveAllocationListener();
813 
814   // Install a gc pause listener.
815   void SetGcPauseListener(GcPauseListener* l);
816   // Get the currently installed gc pause listener, or null.
GetGcPauseListener()817   GcPauseListener* GetGcPauseListener() {
818     return gc_pause_listener_.LoadAcquire();
819   }
820   // Remove a gc pause listener. Note: the listener must not be deleted, as for performance
821   // reasons, we assume it stays valid when we read it (so that we don't require a lock).
822   void RemoveGcPauseListener();
823 
824   const Verification* GetVerification() const;
825 
826  private:
827   class ConcurrentGCTask;
828   class CollectorTransitionTask;
829   class HeapTrimTask;
830 
831   // Compact source space to target space. Returns the collector used.
832   collector::GarbageCollector* Compact(space::ContinuousMemMapAllocSpace* target_space,
833                                        space::ContinuousMemMapAllocSpace* source_space,
834                                        GcCause gc_cause)
835       REQUIRES(Locks::mutator_lock_);
836 
837   void LogGC(GcCause gc_cause, collector::GarbageCollector* collector);
838   void StartGC(Thread* self, GcCause cause, CollectorType collector_type)
839       REQUIRES(!*gc_complete_lock_);
840   void FinishGC(Thread* self, collector::GcType gc_type) REQUIRES(!*gc_complete_lock_);
841 
842   // Create a mem map with a preferred base address.
843   static MemMap* MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
844                                               size_t capacity, std::string* out_error_str);
845 
SupportHSpaceCompaction()846   bool SupportHSpaceCompaction() const {
847     // Returns true if we can do hspace compaction
848     return main_space_backup_ != nullptr;
849   }
850 
AllocatorHasAllocationStack(AllocatorType allocator_type)851   static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) {
852     return
853         allocator_type != kAllocatorTypeBumpPointer &&
854         allocator_type != kAllocatorTypeTLAB &&
855         allocator_type != kAllocatorTypeRegion &&
856         allocator_type != kAllocatorTypeRegionTLAB;
857   }
AllocatorMayHaveConcurrentGC(AllocatorType allocator_type)858   static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) {
859     if (kUseReadBarrier) {
860       // Read barrier may have the TLAB allocator but is always concurrent. TODO: clean this up.
861       return true;
862     }
863     return
864         allocator_type != kAllocatorTypeBumpPointer &&
865         allocator_type != kAllocatorTypeTLAB;
866   }
IsMovingGc(CollectorType collector_type)867   static bool IsMovingGc(CollectorType collector_type) {
868     return
869         collector_type == kCollectorTypeSS ||
870         collector_type == kCollectorTypeGSS ||
871         collector_type == kCollectorTypeCC ||
872         collector_type == kCollectorTypeCCBackground ||
873         collector_type == kCollectorTypeMC ||
874         collector_type == kCollectorTypeHomogeneousSpaceCompact;
875   }
876   bool ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const
877       REQUIRES_SHARED(Locks::mutator_lock_);
878   ALWAYS_INLINE void CheckConcurrentGC(Thread* self,
879                                        size_t new_num_bytes_allocated,
880                                        ObjPtr<mirror::Object>* obj)
881       REQUIRES_SHARED(Locks::mutator_lock_)
882       REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
883 
GetMarkStack()884   accounting::ObjectStack* GetMarkStack() {
885     return mark_stack_.get();
886   }
887 
888   // We don't force this to be inlined since it is a slow path.
889   template <bool kInstrumented, typename PreFenceVisitor>
890   mirror::Object* AllocLargeObject(Thread* self,
891                                    ObjPtr<mirror::Class>* klass,
892                                    size_t byte_count,
893                                    const PreFenceVisitor& pre_fence_visitor)
894       REQUIRES_SHARED(Locks::mutator_lock_)
895       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
896 
897   // Handles Allocate()'s slow allocation path with GC involved after
898   // an initial allocation attempt failed.
899   mirror::Object* AllocateInternalWithGc(Thread* self,
900                                          AllocatorType allocator,
901                                          bool instrumented,
902                                          size_t num_bytes,
903                                          size_t* bytes_allocated,
904                                          size_t* usable_size,
905                                          size_t* bytes_tl_bulk_allocated,
906                                          ObjPtr<mirror::Class>* klass)
907       REQUIRES(!Locks::thread_suspend_count_lock_, !*gc_complete_lock_, !*pending_task_lock_)
908       REQUIRES_SHARED(Locks::mutator_lock_);
909 
910   // Allocate into a specific space.
911   mirror::Object* AllocateInto(Thread* self,
912                                space::AllocSpace* space,
913                                ObjPtr<mirror::Class> c,
914                                size_t bytes)
915       REQUIRES_SHARED(Locks::mutator_lock_);
916 
917   // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
918   // wrong space.
919   void SwapSemiSpaces() REQUIRES(Locks::mutator_lock_);
920 
921   // Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so
922   // that the switch statement is constant optimized in the entrypoints.
923   template <const bool kInstrumented, const bool kGrow>
924   ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self,
925                                               AllocatorType allocator_type,
926                                               size_t alloc_size,
927                                               size_t* bytes_allocated,
928                                               size_t* usable_size,
929                                               size_t* bytes_tl_bulk_allocated)
930       REQUIRES_SHARED(Locks::mutator_lock_);
931 
932   mirror::Object* AllocWithNewTLAB(Thread* self,
933                                    size_t alloc_size,
934                                    bool grow,
935                                    size_t* bytes_allocated,
936                                    size_t* usable_size,
937                                    size_t* bytes_tl_bulk_allocated)
938       REQUIRES_SHARED(Locks::mutator_lock_);
939 
940   void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type)
941       REQUIRES_SHARED(Locks::mutator_lock_);
942 
943   ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
944                                                size_t alloc_size,
945                                                bool grow);
946 
947   // Run the finalizers. If timeout is non zero, then we use the VMRuntime version.
948   void RunFinalization(JNIEnv* env, uint64_t timeout);
949 
950   // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
951   // waited for.
952   collector::GcType WaitForGcToCompleteLocked(GcCause cause, Thread* self)
953       REQUIRES(gc_complete_lock_);
954 
955   void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
956       REQUIRES(!*pending_task_lock_);
957 
958   void RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, ObjPtr<mirror::Object>* obj)
959       REQUIRES_SHARED(Locks::mutator_lock_)
960       REQUIRES(!*pending_task_lock_);
961   bool IsGCRequestPending() const;
962 
963   // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
964   // which type of Gc was actually ran.
965   collector::GcType CollectGarbageInternal(collector::GcType gc_plan,
966                                            GcCause gc_cause,
967                                            bool clear_soft_references)
968       REQUIRES(!*gc_complete_lock_, !Locks::heap_bitmap_lock_, !Locks::thread_suspend_count_lock_,
969                !*pending_task_lock_);
970 
971   void PreGcVerification(collector::GarbageCollector* gc)
972       REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_);
973   void PreGcVerificationPaused(collector::GarbageCollector* gc)
974       REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
975   void PrePauseRosAllocVerification(collector::GarbageCollector* gc)
976       REQUIRES(Locks::mutator_lock_);
977   void PreSweepingGcVerification(collector::GarbageCollector* gc)
978       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
979   void PostGcVerification(collector::GarbageCollector* gc)
980       REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_);
981   void PostGcVerificationPaused(collector::GarbageCollector* gc)
982       REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
983 
984   // Find a collector based on GC type.
985   collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
986 
987   // Create the main free list malloc space, either a RosAlloc space or DlMalloc space.
988   void CreateMainMallocSpace(MemMap* mem_map,
989                              size_t initial_size,
990                              size_t growth_limit,
991                              size_t capacity);
992 
993   // Create a malloc space based on a mem map. Does not set the space as default.
994   space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap* mem_map,
995                                                   size_t initial_size,
996                                                   size_t growth_limit,
997                                                   size_t capacity,
998                                                   const char* name,
999                                                   bool can_move_objects);
1000 
1001   // Given the current contents of the alloc space, increase the allowed heap footprint to match
1002   // the target utilization ratio.  This should only be called immediately after a full garbage
1003   // collection. bytes_allocated_before_gc is used to measure bytes / second for the period which
1004   // the GC was run.
1005   void GrowForUtilization(collector::GarbageCollector* collector_ran,
1006                           uint64_t bytes_allocated_before_gc = 0);
1007 
1008   size_t GetPercentFree();
1009 
1010   // Swap the allocation stack with the live stack.
1011   void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_);
1012 
1013   // Clear cards and update the mod union table. When process_alloc_space_cards is true,
1014   // if clear_alloc_space_cards is true, then we clear cards instead of ageing them. We do
1015   // not process the alloc space if process_alloc_space_cards is false.
1016   void ProcessCards(TimingLogger* timings,
1017                     bool use_rem_sets,
1018                     bool process_alloc_space_cards,
1019                     bool clear_alloc_space_cards)
1020       REQUIRES_SHARED(Locks::mutator_lock_);
1021 
1022   // Push an object onto the allocation stack.
1023   void PushOnAllocationStack(Thread* self, ObjPtr<mirror::Object>* obj)
1024       REQUIRES_SHARED(Locks::mutator_lock_)
1025       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
1026   void PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj)
1027       REQUIRES_SHARED(Locks::mutator_lock_)
1028       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
1029   void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, ObjPtr<mirror::Object>* obj)
1030       REQUIRES_SHARED(Locks::mutator_lock_)
1031       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
1032 
1033   void ClearConcurrentGCRequest();
1034   void ClearPendingTrim(Thread* self) REQUIRES(!*pending_task_lock_);
1035   void ClearPendingCollectorTransition(Thread* self) REQUIRES(!*pending_task_lock_);
1036 
1037   // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
1038   // sweep GC, false for other GC types.
IsGcConcurrent()1039   bool IsGcConcurrent() const ALWAYS_INLINE {
1040     return collector_type_ == kCollectorTypeCMS ||
1041         collector_type_ == kCollectorTypeCC ||
1042         collector_type_ == kCollectorTypeCCBackground;
1043   }
1044 
1045   // Trim the managed and native spaces by releasing unused memory back to the OS.
1046   void TrimSpaces(Thread* self) REQUIRES(!*gc_complete_lock_);
1047 
1048   // Trim 0 pages at the end of reference tables.
1049   void TrimIndirectReferenceTables(Thread* self);
1050 
1051   template <typename Visitor>
1052   ALWAYS_INLINE void VisitObjectsInternal(Visitor&& visitor)
1053       REQUIRES_SHARED(Locks::mutator_lock_)
1054       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
1055   template <typename Visitor>
1056   ALWAYS_INLINE void VisitObjectsInternalRegionSpace(Visitor&& visitor)
1057       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
1058 
1059   void UpdateGcCountRateHistograms() REQUIRES(gc_complete_lock_);
1060 
1061   // GC stress mode attempts to do one GC per unique backtrace.
1062   void CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj)
1063       REQUIRES_SHARED(Locks::mutator_lock_)
1064       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
1065 
NonStickyGcType()1066   collector::GcType NonStickyGcType() const {
1067     return HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
1068   }
1069 
1070   // How large new_native_bytes_allocated_ can grow before we trigger a new
1071   // GC.
NativeAllocationGcWatermark()1072   ALWAYS_INLINE size_t NativeAllocationGcWatermark() const {
1073     // Reuse max_free_ for the native allocation gc watermark, so that the
1074     // native heap is treated in the same way as the Java heap in the case
1075     // where the gc watermark update would exceed max_free_. Using max_free_
1076     // instead of the target utilization means the watermark doesn't depend on
1077     // the current number of registered native allocations.
1078     return max_free_;
1079   }
1080 
1081   // How large new_native_bytes_allocated_ can grow while GC is in progress
1082   // before we block the allocating thread to allow GC to catch up.
NativeAllocationBlockingGcWatermark()1083   ALWAYS_INLINE size_t NativeAllocationBlockingGcWatermark() const {
1084     // Historically the native allocations were bounded by growth_limit_. This
1085     // uses that same value, dividing growth_limit_ by 2 to account for
1086     // the fact that now the bound is relative to the number of retained
1087     // registered native allocations rather than absolute.
1088     return growth_limit_ / 2;
1089   }
1090 
1091   void TraceHeapSize(size_t heap_size);
1092 
1093   // All-known continuous spaces, where objects lie within fixed bounds.
1094   std::vector<space::ContinuousSpace*> continuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
1095 
1096   // All-known discontinuous spaces, where objects may be placed throughout virtual memory.
1097   std::vector<space::DiscontinuousSpace*> discontinuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
1098 
1099   // All-known alloc spaces, where objects may be or have been allocated.
1100   std::vector<space::AllocSpace*> alloc_spaces_;
1101 
1102   // A space where non-movable objects are allocated, when compaction is enabled it contains
1103   // Classes, ArtMethods, ArtFields, and non moving objects.
1104   space::MallocSpace* non_moving_space_;
1105 
1106   // Space which we use for the kAllocatorTypeROSAlloc.
1107   space::RosAllocSpace* rosalloc_space_;
1108 
1109   // Space which we use for the kAllocatorTypeDlMalloc.
1110   space::DlMallocSpace* dlmalloc_space_;
1111 
1112   // The main space is the space which the GC copies to and from on process state updates. This
1113   // space is typically either the dlmalloc_space_ or the rosalloc_space_.
1114   space::MallocSpace* main_space_;
1115 
1116   // The large object space we are currently allocating into.
1117   space::LargeObjectSpace* large_object_space_;
1118 
1119   // The card table, dirtied by the write barrier.
1120   std::unique_ptr<accounting::CardTable> card_table_;
1121 
1122   std::unique_ptr<accounting::ReadBarrierTable> rb_table_;
1123 
1124   // A mod-union table remembers all of the references from the it's space to other spaces.
1125   AllocationTrackingSafeMap<space::Space*, accounting::ModUnionTable*, kAllocatorTagHeap>
1126       mod_union_tables_;
1127 
1128   // A remembered set remembers all of the references from the it's space to the target space.
1129   AllocationTrackingSafeMap<space::Space*, accounting::RememberedSet*, kAllocatorTagHeap>
1130       remembered_sets_;
1131 
1132   // The current collector type.
1133   CollectorType collector_type_;
1134   // Which collector we use when the app is in the foreground.
1135   CollectorType foreground_collector_type_;
1136   // Which collector we will use when the app is notified of a transition to background.
1137   CollectorType background_collector_type_;
1138   // Desired collector type, heap trimming daemon transitions the heap if it is != collector_type_.
1139   CollectorType desired_collector_type_;
1140 
1141   // Lock which guards pending tasks.
1142   Mutex* pending_task_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1143 
1144   // How many GC threads we may use for paused parts of garbage collection.
1145   const size_t parallel_gc_threads_;
1146 
1147   // How many GC threads we may use for unpaused parts of garbage collection.
1148   const size_t conc_gc_threads_;
1149 
1150   // Boolean for if we are in low memory mode.
1151   const bool low_memory_mode_;
1152 
1153   // If we get a pause longer than long pause log threshold, then we print out the GC after it
1154   // finishes.
1155   const size_t long_pause_log_threshold_;
1156 
1157   // If we get a GC longer than long GC log threshold, then we print out the GC after it finishes.
1158   const size_t long_gc_log_threshold_;
1159 
1160   // If we ignore the max footprint it lets the heap grow until it hits the heap capacity, this is
1161   // useful for benchmarking since it reduces time spent in GC to a low %.
1162   const bool ignore_max_footprint_;
1163 
1164   // Lock which guards zygote space creation.
1165   Mutex zygote_creation_lock_;
1166 
1167   // Non-null iff we have a zygote space. Doesn't contain the large objects allocated before
1168   // zygote space creation.
1169   space::ZygoteSpace* zygote_space_;
1170 
1171   // Minimum allocation size of large object.
1172   size_t large_object_threshold_;
1173 
1174   // Guards access to the state of GC, associated conditional variable is used to signal when a GC
1175   // completes.
1176   Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1177   std::unique_ptr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
1178 
1179   // Used to synchronize between JNI critical calls and the thread flip of the CC collector.
1180   Mutex* thread_flip_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1181   std::unique_ptr<ConditionVariable> thread_flip_cond_ GUARDED_BY(thread_flip_lock_);
1182   // This counter keeps track of how many threads are currently in a JNI critical section. This is
1183   // incremented once per thread even with nested enters.
1184   size_t disable_thread_flip_count_ GUARDED_BY(thread_flip_lock_);
1185   bool thread_flip_running_ GUARDED_BY(thread_flip_lock_);
1186 
1187   // Reference processor;
1188   std::unique_ptr<ReferenceProcessor> reference_processor_;
1189 
1190   // Task processor, proxies heap trim requests to the daemon threads.
1191   std::unique_ptr<TaskProcessor> task_processor_;
1192 
1193   // Collector type of the running GC.
1194   volatile CollectorType collector_type_running_ GUARDED_BY(gc_complete_lock_);
1195 
1196   // Cause of the last running GC.
1197   volatile GcCause last_gc_cause_ GUARDED_BY(gc_complete_lock_);
1198 
1199   // The thread currently running the GC.
1200   volatile Thread* thread_running_gc_ GUARDED_BY(gc_complete_lock_);
1201 
1202   // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on.
1203   volatile collector::GcType last_gc_type_ GUARDED_BY(gc_complete_lock_);
1204   collector::GcType next_gc_type_;
1205 
1206   // Maximum size that the heap can reach.
1207   size_t capacity_;
1208 
1209   // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap
1210   // programs it is "cleared" making it the same as capacity.
1211   size_t growth_limit_;
1212 
1213   // When the number of bytes allocated exceeds the footprint TryAllocate returns null indicating
1214   // a GC should be triggered.
1215   size_t max_allowed_footprint_;
1216 
1217   // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
1218   // it completes ahead of an allocation failing.
1219   size_t concurrent_start_bytes_;
1220 
1221   // Since the heap was created, how many bytes have been freed.
1222   uint64_t total_bytes_freed_ever_;
1223 
1224   // Since the heap was created, how many objects have been freed.
1225   uint64_t total_objects_freed_ever_;
1226 
1227   // Number of bytes allocated.  Adjusted after each allocation and free.
1228   Atomic<size_t> num_bytes_allocated_;
1229 
1230   // Number of registered native bytes allocated since the last time GC was
1231   // triggered. Adjusted after each RegisterNativeAllocation and
1232   // RegisterNativeFree. Used to determine when to trigger GC for native
1233   // allocations.
1234   // See the REDESIGN section of go/understanding-register-native-allocation.
1235   Atomic<size_t> new_native_bytes_allocated_;
1236 
1237   // Number of registered native bytes allocated prior to the last time GC was
1238   // triggered, for debugging purposes. The current number of registered
1239   // native bytes is determined by taking the sum of
1240   // old_native_bytes_allocated_ and new_native_bytes_allocated_.
1241   Atomic<size_t> old_native_bytes_allocated_;
1242 
1243   // Used for synchronization when multiple threads call into
1244   // RegisterNativeAllocation and require blocking GC.
1245   // * If a previous blocking GC is in progress, all threads will wait for
1246   // that GC to complete, then wait for one of the threads to complete another
1247   // blocking GC.
1248   // * If a blocking GC is assigned but not in progress, a thread has been
1249   // assigned to run a blocking GC but has not started yet. Threads will wait
1250   // for the assigned blocking GC to complete.
1251   // * If a blocking GC is not assigned nor in progress, the first thread will
1252   // run a blocking GC and signal to other threads that blocking GC has been
1253   // assigned.
1254   Mutex* native_blocking_gc_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1255   std::unique_ptr<ConditionVariable> native_blocking_gc_cond_ GUARDED_BY(native_blocking_gc_lock_);
1256   bool native_blocking_gc_is_assigned_ GUARDED_BY(native_blocking_gc_lock_);
1257   bool native_blocking_gc_in_progress_ GUARDED_BY(native_blocking_gc_lock_);
1258   uint32_t native_blocking_gcs_finished_ GUARDED_BY(native_blocking_gc_lock_);
1259 
1260   // Number of bytes freed by thread local buffer revokes. This will
1261   // cancel out the ahead-of-time bulk counting of bytes allocated in
1262   // rosalloc thread-local buffers.  It is temporarily accumulated
1263   // here to be subtracted from num_bytes_allocated_ later at the next
1264   // GC.
1265   Atomic<size_t> num_bytes_freed_revoke_;
1266 
1267   // Info related to the current or previous GC iteration.
1268   collector::Iteration current_gc_iteration_;
1269 
1270   // Heap verification flags.
1271   const bool verify_missing_card_marks_;
1272   const bool verify_system_weaks_;
1273   const bool verify_pre_gc_heap_;
1274   const bool verify_pre_sweeping_heap_;
1275   const bool verify_post_gc_heap_;
1276   const bool verify_mod_union_table_;
1277   bool verify_pre_gc_rosalloc_;
1278   bool verify_pre_sweeping_rosalloc_;
1279   bool verify_post_gc_rosalloc_;
1280   const bool gc_stress_mode_;
1281 
1282   // RAII that temporarily disables the rosalloc verification during
1283   // the zygote fork.
1284   class ScopedDisableRosAllocVerification {
1285    private:
1286     Heap* const heap_;
1287     const bool orig_verify_pre_gc_;
1288     const bool orig_verify_pre_sweeping_;
1289     const bool orig_verify_post_gc_;
1290 
1291    public:
ScopedDisableRosAllocVerification(Heap * heap)1292     explicit ScopedDisableRosAllocVerification(Heap* heap)
1293         : heap_(heap),
1294           orig_verify_pre_gc_(heap_->verify_pre_gc_rosalloc_),
1295           orig_verify_pre_sweeping_(heap_->verify_pre_sweeping_rosalloc_),
1296           orig_verify_post_gc_(heap_->verify_post_gc_rosalloc_) {
1297       heap_->verify_pre_gc_rosalloc_ = false;
1298       heap_->verify_pre_sweeping_rosalloc_ = false;
1299       heap_->verify_post_gc_rosalloc_ = false;
1300     }
~ScopedDisableRosAllocVerification()1301     ~ScopedDisableRosAllocVerification() {
1302       heap_->verify_pre_gc_rosalloc_ = orig_verify_pre_gc_;
1303       heap_->verify_pre_sweeping_rosalloc_ = orig_verify_pre_sweeping_;
1304       heap_->verify_post_gc_rosalloc_ = orig_verify_post_gc_;
1305     }
1306   };
1307 
1308   // Parallel GC data structures.
1309   std::unique_ptr<ThreadPool> thread_pool_;
1310 
1311   // Estimated allocation rate (bytes / second). Computed between the time of the last GC cycle
1312   // and the start of the current one.
1313   uint64_t allocation_rate_;
1314 
1315   // For a GC cycle, a bitmap that is set corresponding to the
1316   std::unique_ptr<accounting::HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
1317   std::unique_ptr<accounting::HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
1318 
1319   // Mark stack that we reuse to avoid re-allocating the mark stack.
1320   std::unique_ptr<accounting::ObjectStack> mark_stack_;
1321 
1322   // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us
1323   // to use the live bitmap as the old mark bitmap.
1324   const size_t max_allocation_stack_size_;
1325   std::unique_ptr<accounting::ObjectStack> allocation_stack_;
1326 
1327   // Second allocation stack so that we can process allocation with the heap unlocked.
1328   std::unique_ptr<accounting::ObjectStack> live_stack_;
1329 
1330   // Allocator type.
1331   AllocatorType current_allocator_;
1332   const AllocatorType current_non_moving_allocator_;
1333 
1334   // Which GCs we run in order when we an allocation fails.
1335   std::vector<collector::GcType> gc_plan_;
1336 
1337   // Bump pointer spaces.
1338   space::BumpPointerSpace* bump_pointer_space_;
1339   // Temp space is the space which the semispace collector copies to.
1340   space::BumpPointerSpace* temp_space_;
1341 
1342   space::RegionSpace* region_space_;
1343 
1344   // Minimum free guarantees that you always have at least min_free_ free bytes after growing for
1345   // utilization, regardless of target utilization ratio.
1346   size_t min_free_;
1347 
1348   // The ideal maximum free size, when we grow the heap for utilization.
1349   size_t max_free_;
1350 
1351   // Target ideal heap utilization ratio
1352   double target_utilization_;
1353 
1354   // How much more we grow the heap when we are a foreground app instead of background.
1355   double foreground_heap_growth_multiplier_;
1356 
1357   // Total time which mutators are paused or waiting for GC to complete.
1358   uint64_t total_wait_time_;
1359 
1360   // The current state of heap verification, may be enabled or disabled.
1361   VerifyObjectMode verify_object_mode_;
1362 
1363   // Compacting GC disable count, prevents compacting GC from running iff > 0.
1364   size_t disable_moving_gc_count_ GUARDED_BY(gc_complete_lock_);
1365 
1366   std::vector<collector::GarbageCollector*> garbage_collectors_;
1367   collector::SemiSpace* semi_space_collector_;
1368   collector::MarkCompact* mark_compact_collector_;
1369   collector::ConcurrentCopying* concurrent_copying_collector_;
1370 
1371   const bool is_running_on_memory_tool_;
1372   const bool use_tlab_;
1373 
1374   // Pointer to the space which becomes the new main space when we do homogeneous space compaction.
1375   // Use unique_ptr since the space is only added during the homogeneous compaction phase.
1376   std::unique_ptr<space::MallocSpace> main_space_backup_;
1377 
1378   // Minimal interval allowed between two homogeneous space compactions caused by OOM.
1379   uint64_t min_interval_homogeneous_space_compaction_by_oom_;
1380 
1381   // Times of the last homogeneous space compaction caused by OOM.
1382   uint64_t last_time_homogeneous_space_compaction_by_oom_;
1383 
1384   // Saved OOMs by homogeneous space compaction.
1385   Atomic<size_t> count_delayed_oom_;
1386 
1387   // Count for requested homogeneous space compaction.
1388   Atomic<size_t> count_requested_homogeneous_space_compaction_;
1389 
1390   // Count for ignored homogeneous space compaction.
1391   Atomic<size_t> count_ignored_homogeneous_space_compaction_;
1392 
1393   // Count for performed homogeneous space compaction.
1394   Atomic<size_t> count_performed_homogeneous_space_compaction_;
1395 
1396   // Whether or not a concurrent GC is pending.
1397   Atomic<bool> concurrent_gc_pending_;
1398 
1399   // Active tasks which we can modify (change target time, desired collector type, etc..).
1400   CollectorTransitionTask* pending_collector_transition_ GUARDED_BY(pending_task_lock_);
1401   HeapTrimTask* pending_heap_trim_ GUARDED_BY(pending_task_lock_);
1402 
1403   // Whether or not we use homogeneous space compaction to avoid OOM errors.
1404   bool use_homogeneous_space_compaction_for_oom_;
1405 
1406   // True if the currently running collection has made some thread wait.
1407   bool running_collection_is_blocking_ GUARDED_BY(gc_complete_lock_);
1408   // The number of blocking GC runs.
1409   uint64_t blocking_gc_count_;
1410   // The total duration of blocking GC runs.
1411   uint64_t blocking_gc_time_;
1412   // The duration of the window for the GC count rate histograms.
1413   static constexpr uint64_t kGcCountRateHistogramWindowDuration = MsToNs(10 * 1000);  // 10s.
1414   // The last time when the GC count rate histograms were updated.
1415   // This is rounded by kGcCountRateHistogramWindowDuration (a multiple of 10s).
1416   uint64_t last_update_time_gc_count_rate_histograms_;
1417   // The running count of GC runs in the last window.
1418   uint64_t gc_count_last_window_;
1419   // The running count of blocking GC runs in the last window.
1420   uint64_t blocking_gc_count_last_window_;
1421   // The maximum number of buckets in the GC count rate histograms.
1422   static constexpr size_t kGcCountRateMaxBucketCount = 200;
1423   // The histogram of the number of GC invocations per window duration.
1424   Histogram<uint64_t> gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
1425   // The histogram of the number of blocking GC invocations per window duration.
1426   Histogram<uint64_t> blocking_gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
1427 
1428   // Allocation tracking support
1429   Atomic<bool> alloc_tracking_enabled_;
1430   std::unique_ptr<AllocRecordObjectMap> allocation_records_;
1431 
1432   // GC stress related data structures.
1433   Mutex* backtrace_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1434   // Debugging variables, seen backtraces vs unique backtraces.
1435   Atomic<uint64_t> seen_backtrace_count_;
1436   Atomic<uint64_t> unique_backtrace_count_;
1437   // Stack trace hashes that we already saw,
1438   std::unordered_set<uint64_t> seen_backtraces_ GUARDED_BY(backtrace_lock_);
1439 
1440   // We disable GC when we are shutting down the runtime in case there are daemon threads still
1441   // allocating.
1442   bool gc_disabled_for_shutdown_ GUARDED_BY(gc_complete_lock_);
1443 
1444   // Boot image spaces.
1445   std::vector<space::ImageSpace*> boot_image_spaces_;
1446 
1447   // An installed allocation listener.
1448   Atomic<AllocationListener*> alloc_listener_;
1449   // An installed GC Pause listener.
1450   Atomic<GcPauseListener*> gc_pause_listener_;
1451 
1452   std::unique_ptr<Verification> verification_;
1453 
1454   friend class CollectorTransitionTask;
1455   friend class collector::GarbageCollector;
1456   friend class collector::MarkCompact;
1457   friend class collector::ConcurrentCopying;
1458   friend class collector::MarkSweep;
1459   friend class collector::SemiSpace;
1460   friend class ReferenceQueue;
1461   friend class ScopedGCCriticalSection;
1462   friend class VerifyReferenceCardVisitor;
1463   friend class VerifyReferenceVisitor;
1464   friend class VerifyObjectVisitor;
1465 
1466   DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
1467 };
1468 
1469 }  // namespace gc
1470 }  // namespace art
1471 
1472 #endif  // ART_RUNTIME_GC_HEAP_H_
1473