• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_HEAP_H_
18 #define ART_RUNTIME_GC_HEAP_H_
19 
20 #include <iosfwd>
21 #include <string>
22 #include <unordered_set>
23 #include <vector>
24 
25 #include <android-base/logging.h>
26 
27 #include "allocator_type.h"
28 #include "base/atomic.h"
29 #include "base/histogram.h"
30 #include "base/macros.h"
31 #include "base/mutex.h"
32 #include "base/runtime_debug.h"
33 #include "base/safe_map.h"
34 #include "base/time_utils.h"
35 #include "gc/collector/gc_type.h"
36 #include "gc/collector/iteration.h"
37 #include "gc/collector/mark_compact.h"
38 #include "gc/collector_type.h"
39 #include "gc/gc_cause.h"
40 #include "gc/space/large_object_space.h"
41 #include "handle.h"
42 #include "obj_ptr.h"
43 #include "offsets.h"
44 #include "process_state.h"
45 #include "read_barrier_config.h"
46 #include "runtime_globals.h"
47 #include "verify_object.h"
48 
49 namespace art {
50 
51 class ConditionVariable;
52 enum class InstructionSet;
53 class IsMarkedVisitor;
54 class Mutex;
55 class ReflectiveValueVisitor;
56 class RootVisitor;
57 class StackVisitor;
58 class Thread;
59 class ThreadPool;
60 class TimingLogger;
61 class VariableSizedHandleScope;
62 
63 namespace mirror {
64 class Class;
65 class Object;
66 }  // namespace mirror
67 
68 namespace gc {
69 
70 class AllocationListener;
71 class AllocRecordObjectMap;
72 class GcPauseListener;
73 class HeapTask;
74 class ReferenceProcessor;
75 class TaskProcessor;
76 class Verification;
77 
78 namespace accounting {
79 template <typename T> class AtomicStack;
80 using ObjectStack = AtomicStack<mirror::Object>;
81 class CardTable;
82 class HeapBitmap;
83 class ModUnionTable;
84 class ReadBarrierTable;
85 class RememberedSet;
86 }  // namespace accounting
87 
88 namespace collector {
89 class ConcurrentCopying;
90 class GarbageCollector;
91 class MarkSweep;
92 class SemiSpace;
93 }  // namespace collector
94 
95 namespace allocator {
96 class RosAlloc;
97 }  // namespace allocator
98 
99 namespace space {
100 class AllocSpace;
101 class BumpPointerSpace;
102 class ContinuousMemMapAllocSpace;
103 class DiscontinuousSpace;
104 class DlMallocSpace;
105 class ImageSpace;
106 class LargeObjectSpace;
107 class MallocSpace;
108 class RegionSpace;
109 class RosAllocSpace;
110 class Space;
111 class ZygoteSpace;
112 }  // namespace space
113 
114 enum HomogeneousSpaceCompactResult {
115   // Success.
116   kSuccess,
117   // Reject due to disabled moving GC.
118   kErrorReject,
119   // Unsupported due to the current configuration.
120   kErrorUnsupported,
121   // System is shutting down.
122   kErrorVMShuttingDown,
123 };
124 
125 // If true, use rosalloc/RosAllocSpace instead of dlmalloc/DlMallocSpace
126 static constexpr bool kUseRosAlloc = true;
127 
128 // If true, use thread-local allocation stack.
129 static constexpr bool kUseThreadLocalAllocationStack = true;
130 
131 class Heap {
132  public:
133   // How much we grow the TLAB if we can do it.
134   static constexpr size_t kPartialTlabSize = 16 * KB;
135   static constexpr bool kUsePartialTlabs = true;
136 
137   static constexpr size_t kDefaultStartingSize = kPageSize;
138   static constexpr size_t kDefaultInitialSize = 2 * MB;
139   static constexpr size_t kDefaultMaximumSize = 256 * MB;
140   static constexpr size_t kDefaultNonMovingSpaceCapacity = 64 * MB;
141   static constexpr size_t kDefaultMaxFree = 2 * MB;
142   static constexpr size_t kDefaultMinFree = kDefaultMaxFree / 4;
143   static constexpr size_t kDefaultLongPauseLogThreshold = MsToNs(5);
144   static constexpr size_t kDefaultLongPauseLogThresholdGcStress = MsToNs(50);
145   static constexpr size_t kDefaultLongGCLogThreshold = MsToNs(100);
146   static constexpr size_t kDefaultLongGCLogThresholdGcStress = MsToNs(1000);
147   static constexpr size_t kDefaultTLABSize = 32 * KB;
148   static constexpr double kDefaultTargetUtilization = 0.75;
149   static constexpr double kDefaultHeapGrowthMultiplier = 2.0;
150   // Primitive arrays larger than this size are put in the large object space.
151   static constexpr size_t kMinLargeObjectThreshold = 3 * kPageSize;
152   static constexpr size_t kDefaultLargeObjectThreshold = kMinLargeObjectThreshold;
153   // Whether or not parallel GC is enabled. If not, then we never create the thread pool.
154   static constexpr bool kDefaultEnableParallelGC = true;
155   static uint8_t* const kPreferredAllocSpaceBegin;
156 
157   // Whether or not we use the free list large object space. Only use it if USE_ART_LOW_4G_ALLOCATOR
158   // since this means that we have to use the slow msync loop in MemMap::MapAnonymous.
159   static constexpr space::LargeObjectSpaceType kDefaultLargeObjectSpaceType =
160       USE_ART_LOW_4G_ALLOCATOR ?
161           space::LargeObjectSpaceType::kFreeList
162         : space::LargeObjectSpaceType::kMap;
163 
164   // Used so that we don't overflow the allocation time atomic integer.
165   static constexpr size_t kTimeAdjust = 1024;
166 
167   // Client should call NotifyNativeAllocation every kNotifyNativeInterval allocations.
168   // Should be chosen so that time_to_call_mallinfo / kNotifyNativeInterval is on the same order
169   // as object allocation time. time_to_call_mallinfo seems to be on the order of 1 usec
170   // on Android.
171 #ifdef __ANDROID__
172   static constexpr uint32_t kNotifyNativeInterval = 64;
173 #else
174   // Some host mallinfo() implementations are slow. And memory is less scarce.
175   static constexpr uint32_t kNotifyNativeInterval = 384;
176 #endif
177 
178   // RegisterNativeAllocation checks immediately whether GC is needed if size exceeds the
179   // following. kCheckImmediatelyThreshold * kNotifyNativeInterval should be small enough to
180   // make it safe to allocate that many bytes between checks.
181   static constexpr size_t kCheckImmediatelyThreshold = 300000;
182 
183   // How often we allow heap trimming to happen (nanoseconds).
184   static constexpr uint64_t kHeapTrimWait = MsToNs(5000);
185   // Whether the transition-GC heap threshold condition applies or not for non-low memory devices.
186   // Stressing GC will bypass the heap threshold condition.
187   DECLARE_RUNTIME_DEBUG_FLAG(kStressCollectorTransition);
188 
189   // Create a heap with the requested sizes. The possible empty
190   // image_file_names names specify Spaces to load based on
191   // ImageWriter output.
192   Heap(size_t initial_size,
193        size_t growth_limit,
194        size_t min_free,
195        size_t max_free,
196        double target_utilization,
197        double foreground_heap_growth_multiplier,
198        size_t stop_for_native_allocs,
199        size_t capacity,
200        size_t non_moving_space_capacity,
201        const std::vector<std::string>& boot_class_path,
202        const std::vector<std::string>& boot_class_path_locations,
203        const std::vector<int>& boot_class_path_fds,
204        const std::vector<int>& boot_class_path_image_fds,
205        const std::vector<int>& boot_class_path_vdex_fds,
206        const std::vector<int>& boot_class_path_oat_fds,
207        const std::vector<std::string>& image_file_names,
208        InstructionSet image_instruction_set,
209        CollectorType foreground_collector_type,
210        CollectorType background_collector_type,
211        space::LargeObjectSpaceType large_object_space_type,
212        size_t large_object_threshold,
213        size_t parallel_gc_threads,
214        size_t conc_gc_threads,
215        bool low_memory_mode,
216        size_t long_pause_threshold,
217        size_t long_gc_threshold,
218        bool ignore_target_footprint,
219        bool always_log_explicit_gcs,
220        bool use_tlab,
221        bool verify_pre_gc_heap,
222        bool verify_pre_sweeping_heap,
223        bool verify_post_gc_heap,
224        bool verify_pre_gc_rosalloc,
225        bool verify_pre_sweeping_rosalloc,
226        bool verify_post_gc_rosalloc,
227        bool gc_stress_mode,
228        bool measure_gc_performance,
229        bool use_homogeneous_space_compaction,
230        bool use_generational_cc,
231        uint64_t min_interval_homogeneous_space_compaction_by_oom,
232        bool dump_region_info_before_gc,
233        bool dump_region_info_after_gc);
234 
235   ~Heap();
236 
237   // Allocates and initializes storage for an object instance.
238   template <bool kInstrumented = true, typename PreFenceVisitor>
AllocObject(Thread * self,ObjPtr<mirror::Class> klass,size_t num_bytes,const PreFenceVisitor & pre_fence_visitor)239   mirror::Object* AllocObject(Thread* self,
240                               ObjPtr<mirror::Class> klass,
241                               size_t num_bytes,
242                               const PreFenceVisitor& pre_fence_visitor)
243       REQUIRES_SHARED(Locks::mutator_lock_)
244       REQUIRES(!*gc_complete_lock_,
245                !*pending_task_lock_,
246                !*backtrace_lock_,
247                !process_state_update_lock_,
248                !Roles::uninterruptible_) {
249     return AllocObjectWithAllocator<kInstrumented>(self,
250                                                    klass,
251                                                    num_bytes,
252                                                    GetCurrentAllocator(),
253                                                    pre_fence_visitor);
254   }
255 
256   template <bool kInstrumented = true, typename PreFenceVisitor>
AllocNonMovableObject(Thread * self,ObjPtr<mirror::Class> klass,size_t num_bytes,const PreFenceVisitor & pre_fence_visitor)257   mirror::Object* AllocNonMovableObject(Thread* self,
258                                         ObjPtr<mirror::Class> klass,
259                                         size_t num_bytes,
260                                         const PreFenceVisitor& pre_fence_visitor)
261       REQUIRES_SHARED(Locks::mutator_lock_)
262       REQUIRES(!*gc_complete_lock_,
263                !*pending_task_lock_,
264                !*backtrace_lock_,
265                !process_state_update_lock_,
266                !Roles::uninterruptible_) {
267     mirror::Object* obj = AllocObjectWithAllocator<kInstrumented>(self,
268                                                                   klass,
269                                                                   num_bytes,
270                                                                   GetCurrentNonMovingAllocator(),
271                                                                   pre_fence_visitor);
272     // Java Heap Profiler check and sample allocation.
273     JHPCheckNonTlabSampleAllocation(self, obj, num_bytes);
274     return obj;
275   }
276 
277   template <bool kInstrumented = true, bool kCheckLargeObject = true, typename PreFenceVisitor>
278   ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(Thread* self,
279                                                          ObjPtr<mirror::Class> klass,
280                                                          size_t byte_count,
281                                                          AllocatorType allocator,
282                                                          const PreFenceVisitor& pre_fence_visitor)
283       REQUIRES_SHARED(Locks::mutator_lock_)
284       REQUIRES(!*gc_complete_lock_,
285                !*pending_task_lock_,
286                !*backtrace_lock_,
287                !process_state_update_lock_,
288                !Roles::uninterruptible_);
289 
GetCurrentAllocator()290   AllocatorType GetCurrentAllocator() const {
291     return current_allocator_;
292   }
293 
GetCurrentNonMovingAllocator()294   AllocatorType GetCurrentNonMovingAllocator() const {
295     return current_non_moving_allocator_;
296   }
297 
GetUpdatedAllocator(AllocatorType old_allocator)298   AllocatorType GetUpdatedAllocator(AllocatorType old_allocator) {
299     return (old_allocator == kAllocatorTypeNonMoving) ?
300         GetCurrentNonMovingAllocator() : GetCurrentAllocator();
301   }
302 
303   // Visit all of the live objects in the heap.
304   template <typename Visitor>
305   ALWAYS_INLINE void VisitObjects(Visitor&& visitor)
306       REQUIRES_SHARED(Locks::mutator_lock_)
307       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
308   template <typename Visitor>
309   ALWAYS_INLINE void VisitObjectsPaused(Visitor&& visitor)
310       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
311 
312   void VisitReflectiveTargets(ReflectiveValueVisitor* visitor)
313       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
314 
315   void CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count)
316       REQUIRES_SHARED(Locks::mutator_lock_);
317 
318   // Inform the garbage collector of a non-malloc allocated native memory that might become
319   // reclaimable in the future as a result of Java garbage collection.
320   void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
321       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
322   void RegisterNativeFree(JNIEnv* env, size_t bytes);
323 
324   // Notify the garbage collector of malloc allocations that might be reclaimable
325   // as a result of Java garbage collection. Each such call represents approximately
326   // kNotifyNativeInterval such allocations.
327   void NotifyNativeAllocations(JNIEnv* env)
328       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
329 
GetNotifyNativeInterval()330   uint32_t GetNotifyNativeInterval() {
331     return kNotifyNativeInterval;
332   }
333 
334   // Change the allocator, updates entrypoints.
335   void ChangeAllocator(AllocatorType allocator)
336       REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_);
337 
338   // Change the collector to be one of the possible options (MS, CMS, SS). Only safe when no
339   // concurrent accesses to the heap are possible.
340   void ChangeCollector(CollectorType collector_type)
341       REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
342 
343   // The given reference is believed to be to an object in the Java heap, check the soundness of it.
344   // TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a
345   // proper lock ordering for it.
346   void VerifyObjectBody(ObjPtr<mirror::Object> o) NO_THREAD_SAFETY_ANALYSIS;
347 
348   // Consistency check of all live references.
349   void VerifyHeap() REQUIRES(!Locks::heap_bitmap_lock_);
350   // Returns how many failures occured.
351   size_t VerifyHeapReferences(bool verify_referents = true)
352       REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
353   bool VerifyMissingCardMarks()
354       REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
355 
356   // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
357   // and doesn't abort on error, allowing the caller to report more
358   // meaningful diagnostics.
359   bool IsValidObjectAddress(const void* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
360 
361   // Faster alternative to IsHeapAddress since finding if an object is in the large object space is
362   // very slow.
363   bool IsNonDiscontinuousSpaceHeapAddress(const void* addr) const
364       REQUIRES_SHARED(Locks::mutator_lock_);
365 
366   // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
367   // Requires the heap lock to be held.
368   bool IsLiveObjectLocked(ObjPtr<mirror::Object> obj,
369                           bool search_allocation_stack = true,
370                           bool search_live_stack = true,
371                           bool sorted = false)
372       REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
373 
374   // Returns true if there is any chance that the object (obj) will move.
375   bool IsMovableObject(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_);
376 
377   // Enables us to compacting GC until objects are released.
378   void IncrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
379   void DecrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
380 
381   // Temporarily disable thread flip for JNI critical calls.
382   void IncrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_);
383   void DecrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_);
384   void ThreadFlipBegin(Thread* self) REQUIRES(!*thread_flip_lock_);
385   void ThreadFlipEnd(Thread* self) REQUIRES(!*thread_flip_lock_);
386 
387   // Ensures that the obj doesn't cause userfaultfd in JNI critical calls.
388   void EnsureObjectUserfaulted(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
389 
390   // Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits.
391   // Mutator lock is required for GetContinuousSpaces.
392   void ClearMarkedObjects()
393       REQUIRES(Locks::heap_bitmap_lock_)
394       REQUIRES_SHARED(Locks::mutator_lock_);
395 
396   // Initiates an explicit garbage collection. Guarantees that a GC started after this call has
397   // completed.
398   void CollectGarbage(bool clear_soft_references, GcCause cause = kGcCauseExplicit)
399       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
400 
401   // Does a concurrent GC, provided the GC numbered requested_gc_num has not already been
402   // completed. Should only be called by the GC daemon thread through runtime.
403   void ConcurrentGC(Thread* self, GcCause cause, bool force_full, uint32_t requested_gc_num)
404       REQUIRES(!Locks::runtime_shutdown_lock_, !*gc_complete_lock_,
405                !*pending_task_lock_, !process_state_update_lock_);
406 
407   // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
408   // The boolean decides whether to use IsAssignableFrom or == when comparing classes.
409   void CountInstances(const std::vector<Handle<mirror::Class>>& classes,
410                       bool use_is_assignable_from,
411                       uint64_t* counts)
412       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
413       REQUIRES_SHARED(Locks::mutator_lock_);
414 
415   // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
416   // implement dalvik.system.VMRuntime.clearGrowthLimit.
417   void ClearGrowthLimit() REQUIRES(!*gc_complete_lock_);
418 
419   // Make the current growth limit the new maximum capacity, unmaps pages at the end of spaces
420   // which will never be used. Used to implement dalvik.system.VMRuntime.clampGrowthLimit.
421   void ClampGrowthLimit() REQUIRES(!Locks::heap_bitmap_lock_);
422 
423   // Target ideal heap utilization ratio, implements
424   // dalvik.system.VMRuntime.getTargetHeapUtilization.
GetTargetHeapUtilization()425   double GetTargetHeapUtilization() const {
426     return target_utilization_;
427   }
428 
429   // Data structure memory usage tracking.
430   void RegisterGCAllocation(size_t bytes);
431   void RegisterGCDeAllocation(size_t bytes);
432 
433   // Set the heap's private space pointers to be the same as the space based on it's type. Public
434   // due to usage by tests.
435   void SetSpaceAsDefault(space::ContinuousSpace* continuous_space)
436       REQUIRES(!Locks::heap_bitmap_lock_);
437   void AddSpace(space::Space* space)
438       REQUIRES(!Locks::heap_bitmap_lock_)
439       REQUIRES(Locks::mutator_lock_);
440   void RemoveSpace(space::Space* space)
441     REQUIRES(!Locks::heap_bitmap_lock_)
442     REQUIRES(Locks::mutator_lock_);
443 
GetPreGcWeightedAllocatedBytes()444   double GetPreGcWeightedAllocatedBytes() const {
445     return pre_gc_weighted_allocated_bytes_;
446   }
447 
GetPostGcWeightedAllocatedBytes()448   double GetPostGcWeightedAllocatedBytes() const {
449     return post_gc_weighted_allocated_bytes_;
450   }
451 
452   void CalculatePreGcWeightedAllocatedBytes();
453   void CalculatePostGcWeightedAllocatedBytes();
454   uint64_t GetTotalGcCpuTime();
455 
GetProcessCpuStartTime()456   uint64_t GetProcessCpuStartTime() const {
457     return process_cpu_start_time_ns_;
458   }
459 
GetPostGCLastProcessCpuTime()460   uint64_t GetPostGCLastProcessCpuTime() const {
461     return post_gc_last_process_cpu_time_ns_;
462   }
463 
464   // Set target ideal heap utilization ratio, implements
465   // dalvik.system.VMRuntime.setTargetHeapUtilization.
466   void SetTargetHeapUtilization(float target);
467 
468   // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate
469   // from the system. Doesn't allow the space to exceed its growth limit.
470   // Set while we hold gc_complete_lock or collector_type_running_ != kCollectorTypeNone.
471   void SetIdealFootprint(size_t max_allowed_footprint);
472 
473   // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
474   // waited for. Only waits for running collections, ignoring a requested but unstarted GC. Only
475   // heuristic, since a new GC may have started by the time we return.
476   collector::GcType WaitForGcToComplete(GcCause cause, Thread* self) REQUIRES(!*gc_complete_lock_);
477 
478   // Update the heap's process state to a new value, may cause compaction to occur.
479   void UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state)
480       REQUIRES(!*pending_task_lock_, !*gc_complete_lock_, !process_state_update_lock_);
481 
HaveContinuousSpaces()482   bool HaveContinuousSpaces() const NO_THREAD_SAFETY_ANALYSIS {
483     // No lock since vector empty is thread safe.
484     return !continuous_spaces_.empty();
485   }
486 
GetContinuousSpaces()487   const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const
488       REQUIRES_SHARED(Locks::mutator_lock_) {
489     return continuous_spaces_;
490   }
491 
GetDiscontinuousSpaces()492   const std::vector<space::DiscontinuousSpace*>& GetDiscontinuousSpaces() const {
493     return discontinuous_spaces_;
494   }
495 
GetCurrentGcIteration()496   const collector::Iteration* GetCurrentGcIteration() const {
497     return &current_gc_iteration_;
498   }
GetCurrentGcIteration()499   collector::Iteration* GetCurrentGcIteration() {
500     return &current_gc_iteration_;
501   }
502 
503   // Enable verification of object references when the runtime is sufficiently initialized.
EnableObjectValidation()504   void EnableObjectValidation() {
505     verify_object_mode_ = kVerifyObjectSupport;
506     if (verify_object_mode_ > kVerifyObjectModeDisabled) {
507       VerifyHeap();
508     }
509   }
510 
511   // Disable object reference verification for image writing.
DisableObjectValidation()512   void DisableObjectValidation() {
513     verify_object_mode_ = kVerifyObjectModeDisabled;
514   }
515 
516   // Other checks may be performed if we know the heap should be in a healthy state.
IsObjectValidationEnabled()517   bool IsObjectValidationEnabled() const {
518     return verify_object_mode_ > kVerifyObjectModeDisabled;
519   }
520 
521   // Returns true if low memory mode is enabled.
IsLowMemoryMode()522   bool IsLowMemoryMode() const {
523     return low_memory_mode_;
524   }
525 
526   // Returns the heap growth multiplier, this affects how much we grow the heap after a GC.
527   // Scales heap growth, min free, and max free.
528   double HeapGrowthMultiplier() const;
529 
530   // Freed bytes can be negative in cases where we copy objects from a compacted space to a
531   // free-list backed space.
532   void RecordFree(uint64_t freed_objects, int64_t freed_bytes);
533 
534   // Record the bytes freed by thread-local buffer revoke.
535   void RecordFreeRevoke();
536 
GetCardTable()537   accounting::CardTable* GetCardTable() const {
538     return card_table_.get();
539   }
540 
GetReadBarrierTable()541   accounting::ReadBarrierTable* GetReadBarrierTable() const {
542     return rb_table_.get();
543   }
544 
545   void AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object);
546 
547   // Returns the number of bytes currently allocated.
548   // The result should be treated as an approximation, if it is being concurrently updated.
GetBytesAllocated()549   size_t GetBytesAllocated() const {
550     return num_bytes_allocated_.load(std::memory_order_relaxed);
551   }
552 
GetUseGenerationalCC()553   bool GetUseGenerationalCC() const {
554     return use_generational_cc_;
555   }
556 
557   // Returns the number of objects currently allocated.
558   size_t GetObjectsAllocated() const
559       REQUIRES(!Locks::heap_bitmap_lock_);
560 
561   // Returns the total number of objects allocated since the heap was created.
562   uint64_t GetObjectsAllocatedEver() const;
563 
564   // Returns the total number of bytes allocated since the heap was created.
565   uint64_t GetBytesAllocatedEver() const;
566 
567   // Returns the total number of objects freed since the heap was created.
568   // With default memory order, this should be viewed only as a hint.
569   uint64_t GetObjectsFreedEver(std::memory_order mo = std::memory_order_relaxed) const {
570     return total_objects_freed_ever_.load(mo);
571   }
572 
573   // Returns the total number of bytes freed since the heap was created.
574   // With default memory order, this should be viewed only as a hint.
575   uint64_t GetBytesFreedEver(std::memory_order mo = std::memory_order_relaxed) const {
576     return total_bytes_freed_ever_.load(mo);
577   }
578 
GetRegionSpace()579   space::RegionSpace* GetRegionSpace() const {
580     return region_space_;
581   }
582 
GetBumpPointerSpace()583   space::BumpPointerSpace* GetBumpPointerSpace() const {
584     return bump_pointer_space_;
585   }
586   // Implements java.lang.Runtime.maxMemory, returning the maximum amount of memory a program can
587   // consume. For a regular VM this would relate to the -Xmx option and would return -1 if no Xmx
588   // were specified. Android apps start with a growth limit (small heap size) which is
589   // cleared/extended for large apps.
GetMaxMemory()590   size_t GetMaxMemory() const {
591     // There are some race conditions in the allocation code that can cause bytes allocated to
592     // become larger than growth_limit_ in rare cases.
593     return std::max(GetBytesAllocated(), growth_limit_);
594   }
595 
596   // Implements java.lang.Runtime.totalMemory, returning approximate amount of memory currently
597   // consumed by an application.
598   size_t GetTotalMemory() const;
599 
600   // Returns approximately how much free memory we have until the next GC happens.
GetFreeMemoryUntilGC()601   size_t GetFreeMemoryUntilGC() const {
602     return UnsignedDifference(target_footprint_.load(std::memory_order_relaxed),
603                               GetBytesAllocated());
604   }
605 
606   // Returns approximately how much free memory we have until the next OOME happens.
GetFreeMemoryUntilOOME()607   size_t GetFreeMemoryUntilOOME() const {
608     return UnsignedDifference(growth_limit_, GetBytesAllocated());
609   }
610 
611   // Returns how much free memory we have until we need to grow the heap to perform an allocation.
612   // Similar to GetFreeMemoryUntilGC. Implements java.lang.Runtime.freeMemory.
GetFreeMemory()613   size_t GetFreeMemory() const {
614     return UnsignedDifference(GetTotalMemory(),
615                               num_bytes_allocated_.load(std::memory_order_relaxed));
616   }
617 
618   // Get the space that corresponds to an object's address. Current implementation searches all
619   // spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
620   // TODO: consider using faster data structure like binary tree.
621   space::ContinuousSpace* FindContinuousSpaceFromObject(ObjPtr<mirror::Object>, bool fail_ok) const
622       REQUIRES_SHARED(Locks::mutator_lock_);
623 
624   space::ContinuousSpace* FindContinuousSpaceFromAddress(const mirror::Object* addr) const
625       REQUIRES_SHARED(Locks::mutator_lock_);
626 
627   space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object>,
628                                                               bool fail_ok) const
629       REQUIRES_SHARED(Locks::mutator_lock_);
630 
631   space::Space* FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const
632       REQUIRES_SHARED(Locks::mutator_lock_);
633 
634   space::Space* FindSpaceFromAddress(const void* ptr) const
635       REQUIRES_SHARED(Locks::mutator_lock_);
636 
637   std::string DumpSpaceNameFromAddress(const void* addr) const
638       REQUIRES_SHARED(Locks::mutator_lock_);
639 
640   void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_);
641 
642   // Do a pending collector transition.
643   void DoPendingCollectorTransition()
644       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
645 
646   // Deflate monitors, ... and trim the spaces.
647   void Trim(Thread* self) REQUIRES(!*gc_complete_lock_);
648 
649   void RevokeThreadLocalBuffers(Thread* thread);
650   void RevokeRosAllocThreadLocalBuffers(Thread* thread);
651   void RevokeAllThreadLocalBuffers();
652   void AssertThreadLocalBuffersAreRevoked(Thread* thread);
653   void AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
654   void RosAllocVerification(TimingLogger* timings, const char* name)
655       REQUIRES(Locks::mutator_lock_);
656 
GetLiveBitmap()657   accounting::HeapBitmap* GetLiveBitmap() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
658     return live_bitmap_.get();
659   }
660 
GetMarkBitmap()661   accounting::HeapBitmap* GetMarkBitmap() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
662     return mark_bitmap_.get();
663   }
664 
GetLiveStack()665   accounting::ObjectStack* GetLiveStack() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
666     return live_stack_.get();
667   }
668 
GetAllocationStack()669   accounting::ObjectStack* GetAllocationStack() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
670     return allocation_stack_.get();
671   }
672 
673   void PreZygoteFork() NO_THREAD_SAFETY_ANALYSIS;
674 
675   // Mark and empty stack.
676   void FlushAllocStack()
677       REQUIRES_SHARED(Locks::mutator_lock_)
678       REQUIRES(Locks::heap_bitmap_lock_);
679 
680   // Revoke all the thread-local allocation stacks.
681   void RevokeAllThreadLocalAllocationStacks(Thread* self)
682       REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_);
683 
684   // Mark all the objects in the allocation stack in the specified bitmap.
685   // TODO: Refactor?
686   void MarkAllocStack(accounting::SpaceBitmap<kObjectAlignment>* bitmap1,
687                       accounting::SpaceBitmap<kObjectAlignment>* bitmap2,
688                       accounting::SpaceBitmap<kLargeObjectAlignment>* large_objects,
689                       accounting::ObjectStack* stack)
690       REQUIRES_SHARED(Locks::mutator_lock_)
691       REQUIRES(Locks::heap_bitmap_lock_);
692 
693   // Mark the specified allocation stack as live.
694   void MarkAllocStackAsLive(accounting::ObjectStack* stack)
695       REQUIRES_SHARED(Locks::mutator_lock_)
696       REQUIRES(Locks::heap_bitmap_lock_);
697 
698   // Unbind any bound bitmaps.
699   void UnBindBitmaps()
700       REQUIRES(Locks::heap_bitmap_lock_)
701       REQUIRES_SHARED(Locks::mutator_lock_);
702 
703   // Returns the boot image spaces. There may be multiple boot image spaces.
GetBootImageSpaces()704   const std::vector<space::ImageSpace*>& GetBootImageSpaces() const {
705     return boot_image_spaces_;
706   }
707 
708   bool ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const
709       REQUIRES_SHARED(Locks::mutator_lock_);
710 
711   bool IsInBootImageOatFile(const void* p) const
712       REQUIRES_SHARED(Locks::mutator_lock_);
713 
714   // Get the start address of the boot images if any; otherwise returns 0.
GetBootImagesStartAddress()715   uint32_t GetBootImagesStartAddress() const {
716     return boot_images_start_address_;
717   }
718 
719   // Get the size of all boot images, including the heap and oat areas.
GetBootImagesSize()720   uint32_t GetBootImagesSize() const {
721     return boot_images_size_;
722   }
723 
724   // Check if a pointer points to a boot image.
IsBootImageAddress(const void * p)725   bool IsBootImageAddress(const void* p) const {
726     return reinterpret_cast<uintptr_t>(p) - boot_images_start_address_ < boot_images_size_;
727   }
728 
GetDlMallocSpace()729   space::DlMallocSpace* GetDlMallocSpace() const {
730     return dlmalloc_space_;
731   }
732 
GetRosAllocSpace()733   space::RosAllocSpace* GetRosAllocSpace() const {
734     return rosalloc_space_;
735   }
736 
737   // Return the corresponding rosalloc space.
738   space::RosAllocSpace* GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const
739       REQUIRES_SHARED(Locks::mutator_lock_);
740 
GetNonMovingSpace()741   space::MallocSpace* GetNonMovingSpace() const {
742     return non_moving_space_;
743   }
744 
GetLargeObjectsSpace()745   space::LargeObjectSpace* GetLargeObjectsSpace() const {
746     return large_object_space_;
747   }
748 
749   // Returns the free list space that may contain movable objects (the
750   // one that's not the non-moving space), either rosalloc_space_ or
751   // dlmalloc_space_.
GetPrimaryFreeListSpace()752   space::MallocSpace* GetPrimaryFreeListSpace() {
753     if (kUseRosAlloc) {
754       DCHECK(rosalloc_space_ != nullptr);
755       // reinterpret_cast is necessary as the space class hierarchy
756       // isn't known (#included) yet here.
757       return reinterpret_cast<space::MallocSpace*>(rosalloc_space_);
758     } else {
759       DCHECK(dlmalloc_space_ != nullptr);
760       return reinterpret_cast<space::MallocSpace*>(dlmalloc_space_);
761     }
762   }
763 
764   void DumpSpaces(std::ostream& stream) const REQUIRES_SHARED(Locks::mutator_lock_);
765   std::string DumpSpaces() const REQUIRES_SHARED(Locks::mutator_lock_);
766 
767   // GC performance measuring
768   void DumpGcPerformanceInfo(std::ostream& os)
769       REQUIRES(!*gc_complete_lock_);
770   void ResetGcPerformanceInfo() REQUIRES(!*gc_complete_lock_);
771 
772   // Thread pool. Create either the given number of threads, or as per the
773   // values of conc_gc_threads_ and parallel_gc_threads_.
774   void CreateThreadPool(size_t num_threads = 0);
775   void WaitForWorkersToBeCreated();
776   void DeleteThreadPool();
GetThreadPool()777   ThreadPool* GetThreadPool() {
778     return thread_pool_.get();
779   }
GetParallelGCThreadCount()780   size_t GetParallelGCThreadCount() const {
781     return parallel_gc_threads_;
782   }
GetConcGCThreadCount()783   size_t GetConcGCThreadCount() const {
784     return conc_gc_threads_;
785   }
786   accounting::ModUnionTable* FindModUnionTableFromSpace(space::Space* space);
787   void AddModUnionTable(accounting::ModUnionTable* mod_union_table);
788 
789   accounting::RememberedSet* FindRememberedSetFromSpace(space::Space* space);
790   void AddRememberedSet(accounting::RememberedSet* remembered_set);
791   // Also deletes the remebered set.
792   void RemoveRememberedSet(space::Space* space);
793 
794   bool IsCompilingBoot() const;
HasBootImageSpace()795   bool HasBootImageSpace() const {
796     return !boot_image_spaces_.empty();
797   }
798 
GetReferenceProcessor()799   ReferenceProcessor* GetReferenceProcessor() {
800     return reference_processor_.get();
801   }
GetTaskProcessor()802   TaskProcessor* GetTaskProcessor() {
803     return task_processor_.get();
804   }
805 
HasZygoteSpace()806   bool HasZygoteSpace() const {
807     return zygote_space_ != nullptr;
808   }
809 
810   // Returns the active concurrent copying collector.
ConcurrentCopyingCollector()811   collector::ConcurrentCopying* ConcurrentCopyingCollector() {
812     collector::ConcurrentCopying* active_collector =
813             active_concurrent_copying_collector_.load(std::memory_order_relaxed);
814     if (use_generational_cc_) {
815       DCHECK((active_collector == concurrent_copying_collector_) ||
816              (active_collector == young_concurrent_copying_collector_))
817               << "active_concurrent_copying_collector: " << active_collector
818               << " young_concurrent_copying_collector: " << young_concurrent_copying_collector_
819               << " concurrent_copying_collector: " << concurrent_copying_collector_;
820     } else {
821       DCHECK_EQ(active_collector, concurrent_copying_collector_);
822     }
823     return active_collector;
824   }
825 
MarkCompactCollector()826   collector::MarkCompact* MarkCompactCollector() {
827     DCHECK(!gUseUserfaultfd || mark_compact_ != nullptr);
828     return mark_compact_;
829   }
830 
IsPerformingUffdCompaction()831   bool IsPerformingUffdCompaction() { return gUseUserfaultfd && mark_compact_->IsCompacting(); }
832 
CurrentCollectorType()833   CollectorType CurrentCollectorType() const {
834     DCHECK(!gUseUserfaultfd || collector_type_ == kCollectorTypeCMC);
835     return collector_type_;
836   }
837 
IsMovingGc()838   bool IsMovingGc() const { return IsMovingGc(CurrentCollectorType()); }
839 
GetForegroundCollectorType()840   CollectorType GetForegroundCollectorType() const { return foreground_collector_type_; }
841 
IsGcConcurrentAndMoving()842   bool IsGcConcurrentAndMoving() const {
843     if (IsGcConcurrent() && IsMovingGc(collector_type_)) {
844       // Assume no transition when a concurrent moving collector is used.
845       DCHECK_EQ(collector_type_, foreground_collector_type_);
846       return true;
847     }
848     return false;
849   }
850 
IsMovingGCDisabled(Thread * self)851   bool IsMovingGCDisabled(Thread* self) REQUIRES(!*gc_complete_lock_) {
852     MutexLock mu(self, *gc_complete_lock_);
853     return disable_moving_gc_count_ > 0;
854   }
855 
856   // Request an asynchronous trim.
857   void RequestTrim(Thread* self) REQUIRES(!*pending_task_lock_);
858 
859   // Retrieve the current GC number, i.e. the number n such that we completed n GCs so far.
860   // Provides acquire ordering, so that if we read this first, and then check whether a GC is
861   // required, we know that the GC number read actually preceded the test.
GetCurrentGcNum()862   uint32_t GetCurrentGcNum() {
863     return gcs_completed_.load(std::memory_order_acquire);
864   }
865 
866   // Request asynchronous GC. Observed_gc_num is the value of GetCurrentGcNum() when we started to
867   // evaluate the GC triggering condition. If a GC has been completed since then, we consider our
868   // job done. If we return true, then we ensured that gcs_completed_ will eventually be
869   // incremented beyond observed_gc_num. We return false only in corner cases in which we cannot
870   // ensure that.
871   bool RequestConcurrentGC(Thread* self, GcCause cause, bool force_full, uint32_t observed_gc_num)
872       REQUIRES(!*pending_task_lock_);
873 
874   // Whether or not we may use a garbage collector, used so that we only create collectors we need.
875   bool MayUseCollector(CollectorType type) const;
876 
877   // Used by tests to reduce timinig-dependent flakiness in OOME behavior.
SetMinIntervalHomogeneousSpaceCompactionByOom(uint64_t interval)878   void SetMinIntervalHomogeneousSpaceCompactionByOom(uint64_t interval) {
879     min_interval_homogeneous_space_compaction_by_oom_ = interval;
880   }
881 
882   // Helpers for android.os.Debug.getRuntimeStat().
883   uint64_t GetGcCount() const;
884   uint64_t GetGcTime() const;
885   uint64_t GetBlockingGcCount() const;
886   uint64_t GetBlockingGcTime() const;
887   void DumpGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_);
888   void DumpBlockingGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_);
GetTotalTimeWaitingForGC()889   uint64_t GetTotalTimeWaitingForGC() const {
890     return total_wait_time_;
891   }
892   uint64_t GetPreOomeGcCount() const;
893 
894   // Perfetto Art Heap Profiler Support.
GetHeapSampler()895   HeapSampler& GetHeapSampler() {
896     return heap_sampler_;
897   }
898 
899   void InitPerfettoJavaHeapProf();
900   int CheckPerfettoJHPEnabled();
901   // In NonTlab case: Check whether we should report a sample allocation and if so report it.
902   // Also update state (bytes_until_sample).
903   // By calling JHPCheckNonTlabSampleAllocation from different functions for Large allocations and
904   // non-moving allocations we are able to use the stack to identify these allocations separately.
905   void JHPCheckNonTlabSampleAllocation(Thread* self,
906                                        mirror::Object* ret,
907                                        size_t alloc_size);
908   // In Tlab case: Calculate the next tlab size (location of next sample point) and whether
909   // a sample should be taken.
910   size_t JHPCalculateNextTlabSize(Thread* self,
911                                   size_t jhp_def_tlab_size,
912                                   size_t alloc_size,
913                                   bool* take_sample,
914                                   size_t* bytes_until_sample);
915   // Reduce the number of bytes to the next sample position by this adjustment.
916   void AdjustSampleOffset(size_t adjustment);
917 
918   // Allocation tracking support
919   // Callers to this function use double-checked locking to ensure safety on allocation_records_
IsAllocTrackingEnabled()920   bool IsAllocTrackingEnabled() const {
921     return alloc_tracking_enabled_.load(std::memory_order_relaxed);
922   }
923 
SetAllocTrackingEnabled(bool enabled)924   void SetAllocTrackingEnabled(bool enabled) REQUIRES(Locks::alloc_tracker_lock_) {
925     alloc_tracking_enabled_.store(enabled, std::memory_order_relaxed);
926   }
927 
928   // Return the current stack depth of allocation records.
GetAllocTrackerStackDepth()929   size_t GetAllocTrackerStackDepth() const {
930     return alloc_record_depth_;
931   }
932 
933   // Return the current stack depth of allocation records.
SetAllocTrackerStackDepth(size_t alloc_record_depth)934   void SetAllocTrackerStackDepth(size_t alloc_record_depth) {
935     alloc_record_depth_ = alloc_record_depth;
936   }
937 
GetAllocationRecords()938   AllocRecordObjectMap* GetAllocationRecords() const REQUIRES(Locks::alloc_tracker_lock_) {
939     return allocation_records_.get();
940   }
941 
942   void SetAllocationRecords(AllocRecordObjectMap* records)
943       REQUIRES(Locks::alloc_tracker_lock_);
944 
945   void VisitAllocationRecords(RootVisitor* visitor) const
946       REQUIRES_SHARED(Locks::mutator_lock_)
947       REQUIRES(!Locks::alloc_tracker_lock_);
948 
949   void SweepAllocationRecords(IsMarkedVisitor* visitor) const
950       REQUIRES_SHARED(Locks::mutator_lock_)
951       REQUIRES(!Locks::alloc_tracker_lock_);
952 
953   void DisallowNewAllocationRecords() const
954       REQUIRES_SHARED(Locks::mutator_lock_)
955       REQUIRES(!Locks::alloc_tracker_lock_);
956 
957   void AllowNewAllocationRecords() const
958       REQUIRES_SHARED(Locks::mutator_lock_)
959       REQUIRES(!Locks::alloc_tracker_lock_);
960 
961   void BroadcastForNewAllocationRecords() const
962       REQUIRES(!Locks::alloc_tracker_lock_);
963 
964   void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_);
965   bool IsGCDisabledForShutdown() const REQUIRES(!*gc_complete_lock_);
966 
967   // Create a new alloc space and compact default alloc space to it.
968   HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact()
969       REQUIRES(!*gc_complete_lock_, !process_state_update_lock_);
970   bool SupportHomogeneousSpaceCompactAndCollectorTransitions() const;
971 
972   // Install an allocation listener.
973   void SetAllocationListener(AllocationListener* l);
974   // Remove an allocation listener. Note: the listener must not be deleted, as for performance
975   // reasons, we assume it stays valid when we read it (so that we don't require a lock).
976   void RemoveAllocationListener();
977 
978   // Install a gc pause listener.
979   void SetGcPauseListener(GcPauseListener* l);
980   // Get the currently installed gc pause listener, or null.
GetGcPauseListener()981   GcPauseListener* GetGcPauseListener() {
982     return gc_pause_listener_.load(std::memory_order_acquire);
983   }
984   // Remove a gc pause listener. Note: the listener must not be deleted, as for performance
985   // reasons, we assume it stays valid when we read it (so that we don't require a lock).
986   void RemoveGcPauseListener();
987 
988   const Verification* GetVerification() const;
989 
990   void PostForkChildAction(Thread* self) REQUIRES(!*gc_complete_lock_);
991 
992   void TraceHeapSize(size_t heap_size);
993 
994   bool AddHeapTask(gc::HeapTask* task);
995 
996  private:
997   class ConcurrentGCTask;
998   class CollectorTransitionTask;
999   class HeapTrimTask;
1000   class TriggerPostForkCCGcTask;
1001   class ReduceTargetFootprintTask;
1002 
1003   // Compact source space to target space. Returns the collector used.
1004   collector::GarbageCollector* Compact(space::ContinuousMemMapAllocSpace* target_space,
1005                                        space::ContinuousMemMapAllocSpace* source_space,
1006                                        GcCause gc_cause)
1007       REQUIRES(Locks::mutator_lock_);
1008 
1009   void LogGC(GcCause gc_cause, collector::GarbageCollector* collector);
1010   void StartGC(Thread* self, GcCause cause, CollectorType collector_type)
1011       REQUIRES(!*gc_complete_lock_);
1012   void FinishGC(Thread* self, collector::GcType gc_type) REQUIRES(!*gc_complete_lock_);
1013 
1014   double CalculateGcWeightedAllocatedBytes(uint64_t gc_last_process_cpu_time_ns,
1015                                            uint64_t current_process_cpu_time) const;
1016 
1017   // Create a mem map with a preferred base address.
1018   static MemMap MapAnonymousPreferredAddress(const char* name,
1019                                              uint8_t* request_begin,
1020                                              size_t capacity,
1021                                              std::string* out_error_str);
1022 
SupportHSpaceCompaction()1023   bool SupportHSpaceCompaction() const {
1024     // Returns true if we can do hspace compaction
1025     return main_space_backup_ != nullptr;
1026   }
1027 
1028   // Size_t saturating arithmetic
UnsignedDifference(size_t x,size_t y)1029   static ALWAYS_INLINE size_t UnsignedDifference(size_t x, size_t y) {
1030     return x > y ? x - y : 0;
1031   }
UnsignedSum(size_t x,size_t y)1032   static ALWAYS_INLINE size_t UnsignedSum(size_t x, size_t y) {
1033     return x + y >= x ? x + y : std::numeric_limits<size_t>::max();
1034   }
1035 
AllocatorHasAllocationStack(AllocatorType allocator_type)1036   static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) {
1037     return
1038         allocator_type != kAllocatorTypeRegionTLAB &&
1039         allocator_type != kAllocatorTypeBumpPointer &&
1040         allocator_type != kAllocatorTypeTLAB &&
1041         allocator_type != kAllocatorTypeRegion;
1042   }
IsMovingGc(CollectorType collector_type)1043   static bool IsMovingGc(CollectorType collector_type) {
1044     return
1045         collector_type == kCollectorTypeCC ||
1046         collector_type == kCollectorTypeSS ||
1047         collector_type == kCollectorTypeCMC ||
1048         collector_type == kCollectorTypeCCBackground ||
1049         collector_type == kCollectorTypeHomogeneousSpaceCompact;
1050   }
1051   bool ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const
1052       REQUIRES_SHARED(Locks::mutator_lock_);
1053 
1054   // Checks whether we should garbage collect:
1055   ALWAYS_INLINE bool ShouldConcurrentGCForJava(size_t new_num_bytes_allocated);
1056   float NativeMemoryOverTarget(size_t current_native_bytes, bool is_gc_concurrent);
1057   void CheckGCForNative(Thread* self)
1058       REQUIRES(!*pending_task_lock_, !*gc_complete_lock_, !process_state_update_lock_);
1059 
GetMarkStack()1060   accounting::ObjectStack* GetMarkStack() {
1061     return mark_stack_.get();
1062   }
1063 
1064   // We don't force this to be inlined since it is a slow path.
1065   template <bool kInstrumented, typename PreFenceVisitor>
1066   mirror::Object* AllocLargeObject(Thread* self,
1067                                    ObjPtr<mirror::Class>* klass,
1068                                    size_t byte_count,
1069                                    const PreFenceVisitor& pre_fence_visitor)
1070       REQUIRES_SHARED(Locks::mutator_lock_)
1071       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_,
1072                !*backtrace_lock_, !process_state_update_lock_);
1073 
1074   // Handles Allocate()'s slow allocation path with GC involved after an initial allocation
1075   // attempt failed.
1076   // Called with thread suspension disallowed, but re-enables it, and may suspend, internally.
1077   // Returns null if instrumentation or the allocator changed.
1078   mirror::Object* AllocateInternalWithGc(Thread* self,
1079                                          AllocatorType allocator,
1080                                          bool instrumented,
1081                                          size_t num_bytes,
1082                                          size_t* bytes_allocated,
1083                                          size_t* usable_size,
1084                                          size_t* bytes_tl_bulk_allocated,
1085                                          ObjPtr<mirror::Class>* klass)
1086       REQUIRES(!Locks::thread_suspend_count_lock_, !*gc_complete_lock_, !*pending_task_lock_)
1087       REQUIRES(Roles::uninterruptible_)
1088       REQUIRES_SHARED(Locks::mutator_lock_);
1089 
1090   // Allocate into a specific space.
1091   mirror::Object* AllocateInto(Thread* self,
1092                                space::AllocSpace* space,
1093                                ObjPtr<mirror::Class> c,
1094                                size_t bytes)
1095       REQUIRES_SHARED(Locks::mutator_lock_);
1096 
1097   // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
1098   // wrong space.
1099   void SwapSemiSpaces() REQUIRES(Locks::mutator_lock_);
1100 
1101   // Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so
1102   // that the switch statement is constant optimized in the entrypoints.
1103   template <const bool kInstrumented, const bool kGrow>
1104   ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self,
1105                                               AllocatorType allocator_type,
1106                                               size_t alloc_size,
1107                                               size_t* bytes_allocated,
1108                                               size_t* usable_size,
1109                                               size_t* bytes_tl_bulk_allocated)
1110       REQUIRES_SHARED(Locks::mutator_lock_);
1111 
1112   mirror::Object* AllocWithNewTLAB(Thread* self,
1113                                    AllocatorType allocator_type,
1114                                    size_t alloc_size,
1115                                    bool grow,
1116                                    size_t* bytes_allocated,
1117                                    size_t* usable_size,
1118                                    size_t* bytes_tl_bulk_allocated)
1119       REQUIRES_SHARED(Locks::mutator_lock_);
1120 
1121   void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type)
1122       REQUIRES_SHARED(Locks::mutator_lock_);
1123 
1124   // Are we out of memory, and thus should force a GC or fail?
1125   // For concurrent collectors, out of memory is defined by growth_limit_.
1126   // For nonconcurrent collectors it is defined by target_footprint_ unless grow is
1127   // set. If grow is set, the limit is growth_limit_ and we adjust target_footprint_
1128   // to accomodate the allocation.
1129   ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
1130                                                size_t alloc_size,
1131                                                bool grow);
1132 
1133   // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
1134   // waited for.
1135   collector::GcType WaitForGcToCompleteLocked(GcCause cause, Thread* self)
1136       REQUIRES(gc_complete_lock_);
1137 
1138   void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
1139       REQUIRES(!*pending_task_lock_);
1140 
1141   void RequestConcurrentGCAndSaveObject(Thread* self,
1142                                         bool force_full,
1143                                         uint32_t observed_gc_num,
1144                                         ObjPtr<mirror::Object>* obj)
1145       REQUIRES_SHARED(Locks::mutator_lock_)
1146       REQUIRES(!*pending_task_lock_);
1147 
1148   static constexpr uint32_t GC_NUM_ANY = std::numeric_limits<uint32_t>::max();
1149 
1150   // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
1151   // which type of Gc was actually run.
1152   // We pass in the intended GC sequence number to ensure that multiple approximately concurrent
1153   // requests result in a single GC; clearly redundant request will be pruned.  A requested_gc_num
1154   // of GC_NUM_ANY indicates that we should not prune redundant requests.  (In the unlikely case
1155   // that gcs_completed_ gets this big, we just accept a potential extra GC or two.)
1156   collector::GcType CollectGarbageInternal(collector::GcType gc_plan,
1157                                            GcCause gc_cause,
1158                                            bool clear_soft_references,
1159                                            uint32_t requested_gc_num)
1160       REQUIRES(!*gc_complete_lock_, !Locks::heap_bitmap_lock_, !Locks::thread_suspend_count_lock_,
1161                !*pending_task_lock_, !process_state_update_lock_);
1162 
1163   void PreGcVerification(collector::GarbageCollector* gc)
1164       REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_);
1165   void PreGcVerificationPaused(collector::GarbageCollector* gc)
1166       REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
1167   void PrePauseRosAllocVerification(collector::GarbageCollector* gc)
1168       REQUIRES(Locks::mutator_lock_);
1169   void PreSweepingGcVerification(collector::GarbageCollector* gc)
1170       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
1171   void PostGcVerification(collector::GarbageCollector* gc)
1172       REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_);
1173   void PostGcVerificationPaused(collector::GarbageCollector* gc)
1174       REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
1175 
1176   // Find a collector based on GC type.
1177   collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
1178 
1179   // Create the main free list malloc space, either a RosAlloc space or DlMalloc space.
1180   void CreateMainMallocSpace(MemMap&& mem_map,
1181                              size_t initial_size,
1182                              size_t growth_limit,
1183                              size_t capacity);
1184 
1185   // Create a malloc space based on a mem map. Does not set the space as default.
1186   space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap&& mem_map,
1187                                                   size_t initial_size,
1188                                                   size_t growth_limit,
1189                                                   size_t capacity,
1190                                                   const char* name,
1191                                                   bool can_move_objects);
1192 
1193   // Given the current contents of the alloc space, increase the allowed heap footprint to match
1194   // the target utilization ratio.  This should only be called immediately after a full garbage
1195   // collection. bytes_allocated_before_gc is used to measure bytes / second for the period which
1196   // the GC was run.
1197   // This is only called by the thread that set collector_type_running_ to a value other than
1198   // kCollectorTypeNone, or while holding gc_complete_lock, and ensuring that
1199   // collector_type_running_ is kCollectorTypeNone.
1200   void GrowForUtilization(collector::GarbageCollector* collector_ran,
1201                           size_t bytes_allocated_before_gc = 0)
1202       REQUIRES(!process_state_update_lock_);
1203 
1204   size_t GetPercentFree();
1205 
1206   // Swap the allocation stack with the live stack.
1207   void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_);
1208 
1209   // Clear cards and update the mod union table. When process_alloc_space_cards is true,
1210   // if clear_alloc_space_cards is true, then we clear cards instead of ageing them. We do
1211   // not process the alloc space if process_alloc_space_cards is false.
1212   void ProcessCards(TimingLogger* timings,
1213                     bool use_rem_sets,
1214                     bool process_alloc_space_cards,
1215                     bool clear_alloc_space_cards)
1216       REQUIRES_SHARED(Locks::mutator_lock_);
1217 
1218   // Push an object onto the allocation stack.
1219   void PushOnAllocationStack(Thread* self, ObjPtr<mirror::Object>* obj)
1220       REQUIRES_SHARED(Locks::mutator_lock_)
1221       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
1222   void PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj)
1223       REQUIRES_SHARED(Locks::mutator_lock_)
1224       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
1225   void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, ObjPtr<mirror::Object>* obj)
1226       REQUIRES_SHARED(Locks::mutator_lock_)
1227       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
1228 
1229   void ClearPendingTrim(Thread* self) REQUIRES(!*pending_task_lock_);
1230   void ClearPendingCollectorTransition(Thread* self) REQUIRES(!*pending_task_lock_);
1231 
1232   // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
1233   // sweep GC, false for other GC types.
IsGcConcurrent()1234   bool IsGcConcurrent() const ALWAYS_INLINE {
1235     return collector_type_ == kCollectorTypeCC ||
1236         collector_type_ == kCollectorTypeCMC ||
1237         collector_type_ == kCollectorTypeCMS ||
1238         collector_type_ == kCollectorTypeCCBackground;
1239   }
1240 
1241   // Trim the managed and native spaces by releasing unused memory back to the OS.
1242   void TrimSpaces(Thread* self) REQUIRES(!*gc_complete_lock_);
1243 
1244   // Trim 0 pages at the end of reference tables.
1245   void TrimIndirectReferenceTables(Thread* self);
1246 
1247   template <typename Visitor>
1248   ALWAYS_INLINE void VisitObjectsInternal(Visitor&& visitor)
1249       REQUIRES_SHARED(Locks::mutator_lock_)
1250       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
1251   template <typename Visitor>
1252   ALWAYS_INLINE void VisitObjectsInternalRegionSpace(Visitor&& visitor)
1253       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
1254 
1255   void UpdateGcCountRateHistograms() REQUIRES(gc_complete_lock_);
1256 
1257   // GC stress mode attempts to do one GC per unique backtrace.
1258   void CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj)
1259       REQUIRES_SHARED(Locks::mutator_lock_)
1260       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_,
1261                !*backtrace_lock_, !process_state_update_lock_);
1262 
NonStickyGcType()1263   collector::GcType NonStickyGcType() const {
1264     return HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
1265   }
1266 
1267   // Return the amount of space we allow for native memory when deciding whether to
1268   // collect. We collect when a weighted sum of Java memory plus native memory exceeds
1269   // the similarly weighted sum of the Java heap size target and this value.
NativeAllocationGcWatermark()1270   ALWAYS_INLINE size_t NativeAllocationGcWatermark() const {
1271     // We keep the traditional limit of max_free_ in place for small heaps,
1272     // but allow it to be adjusted upward for large heaps to limit GC overhead.
1273     return target_footprint_.load(std::memory_order_relaxed) / 8 + max_free_;
1274   }
1275 
1276   ALWAYS_INLINE void IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke);
1277 
1278   // On switching app from background to foreground, grow the heap size
1279   // to incorporate foreground heap growth multiplier.
1280   void GrowHeapOnJankPerceptibleSwitch() REQUIRES(!process_state_update_lock_);
1281 
1282   // Update *_freed_ever_ counters to reflect current GC values.
1283   void IncrementFreedEver();
1284 
1285   // Remove a vlog code from heap-inl.h which is transitively included in half the world.
1286   static void VlogHeapGrowth(size_t max_allowed_footprint, size_t new_footprint, size_t alloc_size);
1287 
1288   // Return our best approximation of the number of bytes of native memory that
1289   // are currently in use, and could possibly be reclaimed as an indirect result
1290   // of a garbage collection.
1291   size_t GetNativeBytes();
1292 
1293   // Set concurrent_start_bytes_ to a reasonable guess, given target_footprint_ .
1294   void SetDefaultConcurrentStartBytes() REQUIRES(!*gc_complete_lock_);
1295   // This version assumes no concurrent updaters.
1296   void SetDefaultConcurrentStartBytesLocked();
1297 
1298   // All-known continuous spaces, where objects lie within fixed bounds.
1299   std::vector<space::ContinuousSpace*> continuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
1300 
1301   // All-known discontinuous spaces, where objects may be placed throughout virtual memory.
1302   std::vector<space::DiscontinuousSpace*> discontinuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
1303 
1304   // All-known alloc spaces, where objects may be or have been allocated.
1305   std::vector<space::AllocSpace*> alloc_spaces_;
1306 
1307   // A space where non-movable objects are allocated, when compaction is enabled it contains
1308   // Classes, ArtMethods, ArtFields, and non moving objects.
1309   space::MallocSpace* non_moving_space_;
1310 
1311   // Space which we use for the kAllocatorTypeROSAlloc.
1312   space::RosAllocSpace* rosalloc_space_;
1313 
1314   // Space which we use for the kAllocatorTypeDlMalloc.
1315   space::DlMallocSpace* dlmalloc_space_;
1316 
1317   // The main space is the space which the GC copies to and from on process state updates. This
1318   // space is typically either the dlmalloc_space_ or the rosalloc_space_.
1319   space::MallocSpace* main_space_;
1320 
1321   // The large object space we are currently allocating into.
1322   space::LargeObjectSpace* large_object_space_;
1323 
1324   // The card table, dirtied by the write barrier.
1325   std::unique_ptr<accounting::CardTable> card_table_;
1326 
1327   std::unique_ptr<accounting::ReadBarrierTable> rb_table_;
1328 
1329   // A mod-union table remembers all of the references from the it's space to other spaces.
1330   AllocationTrackingSafeMap<space::Space*, accounting::ModUnionTable*, kAllocatorTagHeap>
1331       mod_union_tables_;
1332 
1333   // A remembered set remembers all of the references from the it's space to the target space.
1334   AllocationTrackingSafeMap<space::Space*, accounting::RememberedSet*, kAllocatorTagHeap>
1335       remembered_sets_;
1336 
1337   // The current collector type.
1338   CollectorType collector_type_;
1339   // Which collector we use when the app is in the foreground.
1340   const CollectorType foreground_collector_type_;
1341   // Which collector we will use when the app is notified of a transition to background.
1342   CollectorType background_collector_type_;
1343   // Desired collector type, heap trimming daemon transitions the heap if it is != collector_type_.
1344   CollectorType desired_collector_type_;
1345 
1346   // Lock which guards pending tasks.
1347   Mutex* pending_task_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1348 
1349   // How many GC threads we may use for paused parts of garbage collection.
1350   const size_t parallel_gc_threads_;
1351 
1352   // How many GC threads we may use for unpaused parts of garbage collection.
1353   const size_t conc_gc_threads_;
1354 
1355   // Boolean for if we are in low memory mode.
1356   const bool low_memory_mode_;
1357 
1358   // If we get a pause longer than long pause log threshold, then we print out the GC after it
1359   // finishes.
1360   const size_t long_pause_log_threshold_;
1361 
1362   // If we get a GC longer than long GC log threshold, then we print out the GC after it finishes.
1363   const size_t long_gc_log_threshold_;
1364 
1365   // Starting time of the new process; meant to be used for measuring total process CPU time.
1366   uint64_t process_cpu_start_time_ns_;
1367 
1368   // Last time (before and after) GC started; meant to be used to measure the
1369   // duration between two GCs.
1370   uint64_t pre_gc_last_process_cpu_time_ns_;
1371   uint64_t post_gc_last_process_cpu_time_ns_;
1372 
1373   // allocated_bytes * (current_process_cpu_time - [pre|post]_gc_last_process_cpu_time)
1374   double pre_gc_weighted_allocated_bytes_;
1375   double post_gc_weighted_allocated_bytes_;
1376 
1377   // If we ignore the target footprint it lets the heap grow until it hits the heap capacity, this
1378   // is useful for benchmarking since it reduces time spent in GC to a low %.
1379   const bool ignore_target_footprint_;
1380 
1381   // If we are running tests or some other configurations we might not actually
1382   // want logs for explicit gcs since they can get spammy.
1383   const bool always_log_explicit_gcs_;
1384 
1385   // Lock which guards zygote space creation.
1386   Mutex zygote_creation_lock_;
1387 
1388   // Non-null iff we have a zygote space. Doesn't contain the large objects allocated before
1389   // zygote space creation.
1390   space::ZygoteSpace* zygote_space_;
1391 
1392   // Minimum allocation size of large object.
1393   size_t large_object_threshold_;
1394 
1395   // Guards access to the state of GC, associated conditional variable is used to signal when a GC
1396   // completes.
1397   Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1398   std::unique_ptr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
1399 
1400   // Used to synchronize between JNI critical calls and the thread flip of the CC collector.
1401   Mutex* thread_flip_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1402   std::unique_ptr<ConditionVariable> thread_flip_cond_ GUARDED_BY(thread_flip_lock_);
1403   // This counter keeps track of how many threads are currently in a JNI critical section. This is
1404   // incremented once per thread even with nested enters.
1405   size_t disable_thread_flip_count_ GUARDED_BY(thread_flip_lock_);
1406   bool thread_flip_running_ GUARDED_BY(thread_flip_lock_);
1407 
1408   // Reference processor;
1409   std::unique_ptr<ReferenceProcessor> reference_processor_;
1410 
1411   // Task processor, proxies heap trim requests to the daemon threads.
1412   std::unique_ptr<TaskProcessor> task_processor_;
1413 
1414   // The following are declared volatile only for debugging purposes; it shouldn't otherwise
1415   // matter.
1416 
1417   // Collector type of the running GC.
1418   volatile CollectorType collector_type_running_ GUARDED_BY(gc_complete_lock_);
1419 
1420   // Cause of the last running GC.
1421   volatile GcCause last_gc_cause_ GUARDED_BY(gc_complete_lock_);
1422 
1423   // The thread currently running the GC.
1424   volatile Thread* thread_running_gc_ GUARDED_BY(gc_complete_lock_);
1425 
1426   // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on.
1427   volatile collector::GcType last_gc_type_ GUARDED_BY(gc_complete_lock_);
1428   collector::GcType next_gc_type_;
1429 
1430   // Maximum size that the heap can reach.
1431   size_t capacity_;
1432 
1433   // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap
1434   // programs it is "cleared" making it the same as capacity.
1435   // Only weakly enforced for simultaneous allocations.
1436   size_t growth_limit_;
1437 
1438   // Requested initial heap size. Temporarily ignored after a fork, but then reestablished after
1439   // a while to usually trigger the initial GC.
1440   size_t initial_heap_size_;
1441 
1442   // Target size (as in maximum allocatable bytes) for the heap. Weakly enforced as a limit for
1443   // non-concurrent GC. Used as a guideline for computing concurrent_start_bytes_ in the
1444   // concurrent GC case. Updates normally occur while collector_type_running_ is not none.
1445   Atomic<size_t> target_footprint_;
1446 
1447   Mutex process_state_update_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1448 
1449   // Computed with foreground-multiplier in GrowForUtilization() when run in
1450   // jank non-perceptible state. On update to process state from background to
1451   // foreground we set target_footprint_ and concurrent_start_bytes_ to the corresponding value.
1452   size_t min_foreground_target_footprint_ GUARDED_BY(process_state_update_lock_);
1453   size_t min_foreground_concurrent_start_bytes_ GUARDED_BY(process_state_update_lock_);
1454 
1455   // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
1456   // it completes ahead of an allocation failing.
1457   // A multiple of this is also used to determine when to trigger a GC in response to native
1458   // allocation.
1459   // After initialization, this is only updated by the thread that set collector_type_running_ to
1460   // a value other than kCollectorTypeNone, or while holding gc_complete_lock, and ensuring that
1461   // collector_type_running_ is kCollectorTypeNone.
1462   size_t concurrent_start_bytes_;
1463 
1464   // Since the heap was created, how many bytes have been freed.
1465   std::atomic<uint64_t> total_bytes_freed_ever_;
1466 
1467   // Since the heap was created, how many objects have been freed.
1468   std::atomic<uint64_t> total_objects_freed_ever_;
1469 
1470   // Number of bytes currently allocated and not yet reclaimed. Includes active
1471   // TLABS in their entirety, even if they have not yet been parceled out.
1472   Atomic<size_t> num_bytes_allocated_;
1473 
1474   // Number of registered native bytes allocated. Adjusted after each RegisterNativeAllocation and
1475   // RegisterNativeFree. Used to  help determine when to trigger GC for native allocations. Should
1476   // not include bytes allocated through the system malloc, since those are implicitly included.
1477   Atomic<size_t> native_bytes_registered_;
1478 
1479   // Approximately the smallest value of GetNativeBytes() we've seen since the last GC.
1480   Atomic<size_t> old_native_bytes_allocated_;
1481 
1482   // Total number of native objects of which we were notified since the beginning of time, mod 2^32.
1483   // Allows us to check for GC only roughly every kNotifyNativeInterval allocations.
1484   Atomic<uint32_t> native_objects_notified_;
1485 
1486   // Number of bytes freed by thread local buffer revokes. This will
1487   // cancel out the ahead-of-time bulk counting of bytes allocated in
1488   // rosalloc thread-local buffers.  It is temporarily accumulated
1489   // here to be subtracted from num_bytes_allocated_ later at the next
1490   // GC.
1491   Atomic<size_t> num_bytes_freed_revoke_;
1492 
1493   // Records the number of bytes allocated at the time of GC, which is used later to calculate
1494   // how many bytes have been allocated since the last GC
1495   size_t num_bytes_alive_after_gc_;
1496 
1497   // Info related to the current or previous GC iteration.
1498   collector::Iteration current_gc_iteration_;
1499 
1500   // Heap verification flags.
1501   const bool verify_missing_card_marks_;
1502   const bool verify_system_weaks_;
1503   const bool verify_pre_gc_heap_;
1504   const bool verify_pre_sweeping_heap_;
1505   const bool verify_post_gc_heap_;
1506   const bool verify_mod_union_table_;
1507   bool verify_pre_gc_rosalloc_;
1508   bool verify_pre_sweeping_rosalloc_;
1509   bool verify_post_gc_rosalloc_;
1510   const bool gc_stress_mode_;
1511 
1512   // RAII that temporarily disables the rosalloc verification during
1513   // the zygote fork.
1514   class ScopedDisableRosAllocVerification {
1515    private:
1516     Heap* const heap_;
1517     const bool orig_verify_pre_gc_;
1518     const bool orig_verify_pre_sweeping_;
1519     const bool orig_verify_post_gc_;
1520 
1521    public:
ScopedDisableRosAllocVerification(Heap * heap)1522     explicit ScopedDisableRosAllocVerification(Heap* heap)
1523         : heap_(heap),
1524           orig_verify_pre_gc_(heap_->verify_pre_gc_rosalloc_),
1525           orig_verify_pre_sweeping_(heap_->verify_pre_sweeping_rosalloc_),
1526           orig_verify_post_gc_(heap_->verify_post_gc_rosalloc_) {
1527       heap_->verify_pre_gc_rosalloc_ = false;
1528       heap_->verify_pre_sweeping_rosalloc_ = false;
1529       heap_->verify_post_gc_rosalloc_ = false;
1530     }
~ScopedDisableRosAllocVerification()1531     ~ScopedDisableRosAllocVerification() {
1532       heap_->verify_pre_gc_rosalloc_ = orig_verify_pre_gc_;
1533       heap_->verify_pre_sweeping_rosalloc_ = orig_verify_pre_sweeping_;
1534       heap_->verify_post_gc_rosalloc_ = orig_verify_post_gc_;
1535     }
1536   };
1537 
1538   // Parallel GC data structures.
1539   std::unique_ptr<ThreadPool> thread_pool_;
1540 
1541   // A bitmap that is set corresponding to the known live objects since the last GC cycle.
1542   std::unique_ptr<accounting::HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
1543   // A bitmap that is set corresponding to the marked objects in the current GC cycle.
1544   std::unique_ptr<accounting::HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
1545 
1546   // Mark stack that we reuse to avoid re-allocating the mark stack.
1547   std::unique_ptr<accounting::ObjectStack> mark_stack_;
1548 
1549   // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us
1550   // to use the live bitmap as the old mark bitmap.
1551   const size_t max_allocation_stack_size_;
1552   std::unique_ptr<accounting::ObjectStack> allocation_stack_;
1553 
1554   // Second allocation stack so that we can process allocation with the heap unlocked.
1555   std::unique_ptr<accounting::ObjectStack> live_stack_;
1556 
1557   // Allocator type.
1558   AllocatorType current_allocator_;
1559   const AllocatorType current_non_moving_allocator_;
1560 
1561   // Which GCs we run in order when an allocation fails.
1562   std::vector<collector::GcType> gc_plan_;
1563 
1564   // Bump pointer spaces.
1565   space::BumpPointerSpace* bump_pointer_space_;
1566   // Temp space is the space which the semispace collector copies to.
1567   space::BumpPointerSpace* temp_space_;
1568 
1569   // Region space, used by the concurrent collector.
1570   space::RegionSpace* region_space_;
1571 
1572   // Minimum free guarantees that you always have at least min_free_ free bytes after growing for
1573   // utilization, regardless of target utilization ratio.
1574   const size_t min_free_;
1575 
1576   // The ideal maximum free size, when we grow the heap for utilization.
1577   const size_t max_free_;
1578 
1579   // Target ideal heap utilization ratio.
1580   double target_utilization_;
1581 
1582   // How much more we grow the heap when we are a foreground app instead of background.
1583   double foreground_heap_growth_multiplier_;
1584 
1585   // The amount of native memory allocation since the last GC required to cause us to wait for a
1586   // collection as a result of native allocation. Very large values can cause the device to run
1587   // out of memory, due to lack of finalization to reclaim native memory.  Making it too small can
1588   // cause jank in apps like launcher that intentionally allocate large amounts of memory in rapid
1589   // succession. (b/122099093) 1/4 to 1/3 of physical memory seems to be a good number.
1590   const size_t stop_for_native_allocs_;
1591 
1592   // Total time which mutators are paused or waiting for GC to complete.
1593   uint64_t total_wait_time_;
1594 
1595   // The current state of heap verification, may be enabled or disabled.
1596   VerifyObjectMode verify_object_mode_;
1597 
1598   // Compacting GC disable count, prevents compacting GC from running iff > 0.
1599   size_t disable_moving_gc_count_ GUARDED_BY(gc_complete_lock_);
1600 
1601   std::vector<collector::GarbageCollector*> garbage_collectors_;
1602   collector::SemiSpace* semi_space_collector_;
1603   collector::MarkCompact* mark_compact_;
1604   Atomic<collector::ConcurrentCopying*> active_concurrent_copying_collector_;
1605   collector::ConcurrentCopying* young_concurrent_copying_collector_;
1606   collector::ConcurrentCopying* concurrent_copying_collector_;
1607 
1608   const bool is_running_on_memory_tool_;
1609   const bool use_tlab_;
1610 
1611   // Pointer to the space which becomes the new main space when we do homogeneous space compaction.
1612   // Use unique_ptr since the space is only added during the homogeneous compaction phase.
1613   std::unique_ptr<space::MallocSpace> main_space_backup_;
1614 
1615   // Minimal interval allowed between two homogeneous space compactions caused by OOM.
1616   uint64_t min_interval_homogeneous_space_compaction_by_oom_;
1617 
1618   // Times of the last homogeneous space compaction caused by OOM.
1619   uint64_t last_time_homogeneous_space_compaction_by_oom_;
1620 
1621   // Saved OOMs by homogeneous space compaction.
1622   Atomic<size_t> count_delayed_oom_;
1623 
1624   // Count for requested homogeneous space compaction.
1625   Atomic<size_t> count_requested_homogeneous_space_compaction_;
1626 
1627   // Count for ignored homogeneous space compaction.
1628   Atomic<size_t> count_ignored_homogeneous_space_compaction_;
1629 
1630   // Count for performed homogeneous space compaction.
1631   Atomic<size_t> count_performed_homogeneous_space_compaction_;
1632 
1633   // The number of garbage collections (either young or full, not trims or the like) we have
1634   // completed since heap creation. We include requests that turned out to be impossible
1635   // because they were disabled. We guard against wrapping, though that's unlikely.
1636   // Increment is guarded by gc_complete_lock_.
1637   Atomic<uint32_t> gcs_completed_;
1638 
1639   // The number of the last garbage collection that has been requested.  A value of gcs_completed
1640   // + 1 indicates that another collection is needed or in progress. A value of gcs_completed_ or
1641   // (logically) less means that no new GC has been requested.
1642   Atomic<uint32_t> max_gc_requested_;
1643 
1644   // Active tasks which we can modify (change target time, desired collector type, etc..).
1645   CollectorTransitionTask* pending_collector_transition_ GUARDED_BY(pending_task_lock_);
1646   HeapTrimTask* pending_heap_trim_ GUARDED_BY(pending_task_lock_);
1647 
1648   // Whether or not we use homogeneous space compaction to avoid OOM errors.
1649   bool use_homogeneous_space_compaction_for_oom_;
1650 
1651   // If true, enable generational collection when using the Concurrent Copying
1652   // (CC) collector, i.e. use sticky-bit CC for minor collections and (full) CC
1653   // for major collections. Set in Heap constructor.
1654   const bool use_generational_cc_;
1655 
1656   // True if the currently running collection has made some thread wait.
1657   bool running_collection_is_blocking_ GUARDED_BY(gc_complete_lock_);
1658   // The number of blocking GC runs.
1659   uint64_t blocking_gc_count_;
1660   // The total duration of blocking GC runs.
1661   uint64_t blocking_gc_time_;
1662   // The duration of the window for the GC count rate histograms.
1663   static constexpr uint64_t kGcCountRateHistogramWindowDuration = MsToNs(10 * 1000);  // 10s.
1664   // Maximum number of missed histogram windows for which statistics will be collected.
1665   static constexpr uint64_t kGcCountRateHistogramMaxNumMissedWindows = 100;
1666   // The last time when the GC count rate histograms were updated.
1667   // This is rounded by kGcCountRateHistogramWindowDuration (a multiple of 10s).
1668   uint64_t last_update_time_gc_count_rate_histograms_;
1669   // The running count of GC runs in the last window.
1670   uint64_t gc_count_last_window_;
1671   // The running count of blocking GC runs in the last window.
1672   uint64_t blocking_gc_count_last_window_;
1673   // The maximum number of buckets in the GC count rate histograms.
1674   static constexpr size_t kGcCountRateMaxBucketCount = 200;
1675   // The histogram of the number of GC invocations per window duration.
1676   Histogram<uint64_t> gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
1677   // The histogram of the number of blocking GC invocations per window duration.
1678   Histogram<uint64_t> blocking_gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
1679 
1680   // Allocation tracking support
1681   Atomic<bool> alloc_tracking_enabled_;
1682   std::unique_ptr<AllocRecordObjectMap> allocation_records_;
1683   size_t alloc_record_depth_;
1684 
1685   // Perfetto Java Heap Profiler support.
1686   HeapSampler heap_sampler_;
1687 
1688   // GC stress related data structures.
1689   Mutex* backtrace_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1690   // Debugging variables, seen backtraces vs unique backtraces.
1691   Atomic<uint64_t> seen_backtrace_count_;
1692   Atomic<uint64_t> unique_backtrace_count_;
1693   // Stack trace hashes that we already saw,
1694   std::unordered_set<uint64_t> seen_backtraces_ GUARDED_BY(backtrace_lock_);
1695 
1696   // We disable GC when we are shutting down the runtime in case there are daemon threads still
1697   // allocating.
1698   bool gc_disabled_for_shutdown_ GUARDED_BY(gc_complete_lock_);
1699 
1700   // Turned on by -XX:DumpRegionInfoBeforeGC and -XX:DumpRegionInfoAfterGC to
1701   // emit region info before and after each GC cycle.
1702   bool dump_region_info_before_gc_;
1703   bool dump_region_info_after_gc_;
1704 
1705   // Boot image spaces.
1706   std::vector<space::ImageSpace*> boot_image_spaces_;
1707 
1708   // Boot image address range. Includes images and oat files.
1709   uint32_t boot_images_start_address_;
1710   uint32_t boot_images_size_;
1711 
1712   // The number of times we initiated a GC of last resort to try to avoid an OOME.
1713   Atomic<uint64_t> pre_oome_gc_count_;
1714 
1715   // An installed allocation listener.
1716   Atomic<AllocationListener*> alloc_listener_;
1717   // An installed GC Pause listener.
1718   Atomic<GcPauseListener*> gc_pause_listener_;
1719 
1720   std::unique_ptr<Verification> verification_;
1721 
1722   friend class CollectorTransitionTask;
1723   friend class collector::GarbageCollector;
1724   friend class collector::ConcurrentCopying;
1725   friend class collector::MarkCompact;
1726   friend class collector::MarkSweep;
1727   friend class collector::SemiSpace;
1728   friend class GCCriticalSection;
1729   friend class ReferenceQueue;
1730   friend class ScopedGCCriticalSection;
1731   friend class ScopedInterruptibleGCCriticalSection;
1732   friend class VerifyReferenceCardVisitor;
1733   friend class VerifyReferenceVisitor;
1734   friend class VerifyObjectVisitor;
1735 
1736   DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
1737 };
1738 
1739 }  // namespace gc
1740 }  // namespace art
1741 
1742 #endif  // ART_RUNTIME_GC_HEAP_H_
1743