• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_HEAP_H_
18 #define ART_RUNTIME_GC_HEAP_H_
19 
20 #include <iosfwd>
21 #include <string>
22 #include <unordered_set>
23 #include <vector>
24 
25 #include <android-base/logging.h>
26 
27 #include "allocator_type.h"
28 #include "base/atomic.h"
29 #include "base/histogram.h"
30 #include "base/macros.h"
31 #include "base/mutex.h"
32 #include "base/runtime_debug.h"
33 #include "base/safe_map.h"
34 #include "base/time_utils.h"
35 #include "gc/collector/gc_type.h"
36 #include "gc/collector/iteration.h"
37 #include "gc/collector_type.h"
38 #include "gc/gc_cause.h"
39 #include "gc/space/large_object_space.h"
40 #include "handle.h"
41 #include "obj_ptr.h"
42 #include "offsets.h"
43 #include "process_state.h"
44 #include "read_barrier_config.h"
45 #include "runtime_globals.h"
46 #include "verify_object.h"
47 
48 namespace art {
49 
50 class ConditionVariable;
51 enum class InstructionSet;
52 class IsMarkedVisitor;
53 class Mutex;
54 class ReflectiveValueVisitor;
55 class RootVisitor;
56 class StackVisitor;
57 class Thread;
58 class ThreadPool;
59 class TimingLogger;
60 class VariableSizedHandleScope;
61 
62 namespace mirror {
63 class Class;
64 class Object;
65 }  // namespace mirror
66 
67 namespace gc {
68 
69 class AllocationListener;
70 class AllocRecordObjectMap;
71 class GcPauseListener;
72 class HeapTask;
73 class ReferenceProcessor;
74 class TaskProcessor;
75 class Verification;
76 
77 namespace accounting {
78 template <typename T> class AtomicStack;
79 using ObjectStack = AtomicStack<mirror::Object>;
80 class CardTable;
81 class HeapBitmap;
82 class ModUnionTable;
83 class ReadBarrierTable;
84 class RememberedSet;
85 }  // namespace accounting
86 
87 namespace collector {
88 class ConcurrentCopying;
89 class GarbageCollector;
90 class MarkSweep;
91 class SemiSpace;
92 }  // namespace collector
93 
94 namespace allocator {
95 class RosAlloc;
96 }  // namespace allocator
97 
98 namespace space {
99 class AllocSpace;
100 class BumpPointerSpace;
101 class ContinuousMemMapAllocSpace;
102 class DiscontinuousSpace;
103 class DlMallocSpace;
104 class ImageSpace;
105 class LargeObjectSpace;
106 class MallocSpace;
107 class RegionSpace;
108 class RosAllocSpace;
109 class Space;
110 class ZygoteSpace;
111 }  // namespace space
112 
113 enum HomogeneousSpaceCompactResult {
114   // Success.
115   kSuccess,
116   // Reject due to disabled moving GC.
117   kErrorReject,
118   // Unsupported due to the current configuration.
119   kErrorUnsupported,
120   // System is shutting down.
121   kErrorVMShuttingDown,
122 };
123 
124 // If true, use rosalloc/RosAllocSpace instead of dlmalloc/DlMallocSpace
125 static constexpr bool kUseRosAlloc = true;
126 
127 // If true, use thread-local allocation stack.
128 static constexpr bool kUseThreadLocalAllocationStack = true;
129 
130 class Heap {
131  public:
132   // How much we grow the TLAB if we can do it.
133   static constexpr size_t kPartialTlabSize = 16 * KB;
134   static constexpr bool kUsePartialTlabs = true;
135 
136   static constexpr size_t kDefaultStartingSize = kPageSize;
137   static constexpr size_t kDefaultInitialSize = 2 * MB;
138   static constexpr size_t kDefaultMaximumSize = 256 * MB;
139   static constexpr size_t kDefaultNonMovingSpaceCapacity = 64 * MB;
140   static constexpr size_t kDefaultMaxFree = 2 * MB;
141   static constexpr size_t kDefaultMinFree = kDefaultMaxFree / 4;
142   static constexpr size_t kDefaultLongPauseLogThreshold = MsToNs(5);
143   static constexpr size_t kDefaultLongPauseLogThresholdGcStress = MsToNs(50);
144   static constexpr size_t kDefaultLongGCLogThreshold = MsToNs(100);
145   static constexpr size_t kDefaultLongGCLogThresholdGcStress = MsToNs(1000);
146   static constexpr size_t kDefaultTLABSize = 32 * KB;
147   static constexpr double kDefaultTargetUtilization = 0.75;
148   static constexpr double kDefaultHeapGrowthMultiplier = 2.0;
149   // Primitive arrays larger than this size are put in the large object space.
150   static constexpr size_t kMinLargeObjectThreshold = 3 * kPageSize;
151   static constexpr size_t kDefaultLargeObjectThreshold = kMinLargeObjectThreshold;
152   // Whether or not parallel GC is enabled. If not, then we never create the thread pool.
153   static constexpr bool kDefaultEnableParallelGC = false;
154   static uint8_t* const kPreferredAllocSpaceBegin;
155 
156   // Whether or not we use the free list large object space. Only use it if USE_ART_LOW_4G_ALLOCATOR
157   // since this means that we have to use the slow msync loop in MemMap::MapAnonymous.
158   static constexpr space::LargeObjectSpaceType kDefaultLargeObjectSpaceType =
159       USE_ART_LOW_4G_ALLOCATOR ?
160           space::LargeObjectSpaceType::kFreeList
161         : space::LargeObjectSpaceType::kMap;
162 
163   // Used so that we don't overflow the allocation time atomic integer.
164   static constexpr size_t kTimeAdjust = 1024;
165 
166   // Client should call NotifyNativeAllocation every kNotifyNativeInterval allocations.
167   // Should be chosen so that time_to_call_mallinfo / kNotifyNativeInterval is on the same order
168   // as object allocation time. time_to_call_mallinfo seems to be on the order of 1 usec
169   // on Android.
170 #ifdef __ANDROID__
171   static constexpr uint32_t kNotifyNativeInterval = 64;
172 #else
173   // Some host mallinfo() implementations are slow. And memory is less scarce.
174   static constexpr uint32_t kNotifyNativeInterval = 384;
175 #endif
176 
177   // RegisterNativeAllocation checks immediately whether GC is needed if size exceeds the
178   // following. kCheckImmediatelyThreshold * kNotifyNativeInterval should be small enough to
179   // make it safe to allocate that many bytes between checks.
180   static constexpr size_t kCheckImmediatelyThreshold = 300000;
181 
182   // How often we allow heap trimming to happen (nanoseconds).
183   static constexpr uint64_t kHeapTrimWait = MsToNs(5000);
184   // How long we wait after a transition request to perform a collector transition (nanoseconds).
185   static constexpr uint64_t kCollectorTransitionWait = MsToNs(5000);
186   // Whether the transition-wait applies or not. Zero wait will stress the
187   // transition code and collector, but increases jank probability.
188   DECLARE_RUNTIME_DEBUG_FLAG(kStressCollectorTransition);
189 
190   // Create a heap with the requested sizes. The possible empty
191   // image_file_names names specify Spaces to load based on
192   // ImageWriter output.
193   Heap(size_t initial_size,
194        size_t growth_limit,
195        size_t min_free,
196        size_t max_free,
197        double target_utilization,
198        double foreground_heap_growth_multiplier,
199        size_t stop_for_native_allocs,
200        size_t capacity,
201        size_t non_moving_space_capacity,
202        const std::vector<std::string>& boot_class_path,
203        const std::vector<std::string>& boot_class_path_locations,
204        const std::vector<int>& boot_class_path_fds,
205        const std::vector<int>& boot_class_path_image_fds,
206        const std::vector<int>& boot_class_path_vdex_fds,
207        const std::vector<int>& boot_class_path_oat_fds,
208        const std::vector<std::string>& image_file_names,
209        InstructionSet image_instruction_set,
210        CollectorType foreground_collector_type,
211        CollectorType background_collector_type,
212        space::LargeObjectSpaceType large_object_space_type,
213        size_t large_object_threshold,
214        size_t parallel_gc_threads,
215        size_t conc_gc_threads,
216        bool low_memory_mode,
217        size_t long_pause_threshold,
218        size_t long_gc_threshold,
219        bool ignore_target_footprint,
220        bool always_log_explicit_gcs,
221        bool use_tlab,
222        bool verify_pre_gc_heap,
223        bool verify_pre_sweeping_heap,
224        bool verify_post_gc_heap,
225        bool verify_pre_gc_rosalloc,
226        bool verify_pre_sweeping_rosalloc,
227        bool verify_post_gc_rosalloc,
228        bool gc_stress_mode,
229        bool measure_gc_performance,
230        bool use_homogeneous_space_compaction,
231        bool use_generational_cc,
232        uint64_t min_interval_homogeneous_space_compaction_by_oom,
233        bool dump_region_info_before_gc,
234        bool dump_region_info_after_gc);
235 
236   ~Heap();
237 
238   // Allocates and initializes storage for an object instance.
239   template <bool kInstrumented = true, typename PreFenceVisitor>
AllocObject(Thread * self,ObjPtr<mirror::Class> klass,size_t num_bytes,const PreFenceVisitor & pre_fence_visitor)240   mirror::Object* AllocObject(Thread* self,
241                               ObjPtr<mirror::Class> klass,
242                               size_t num_bytes,
243                               const PreFenceVisitor& pre_fence_visitor)
244       REQUIRES_SHARED(Locks::mutator_lock_)
245       REQUIRES(!*gc_complete_lock_,
246                !*pending_task_lock_,
247                !*backtrace_lock_,
248                !process_state_update_lock_,
249                !Roles::uninterruptible_) {
250     return AllocObjectWithAllocator<kInstrumented>(self,
251                                                    klass,
252                                                    num_bytes,
253                                                    GetCurrentAllocator(),
254                                                    pre_fence_visitor);
255   }
256 
257   template <bool kInstrumented = true, typename PreFenceVisitor>
AllocNonMovableObject(Thread * self,ObjPtr<mirror::Class> klass,size_t num_bytes,const PreFenceVisitor & pre_fence_visitor)258   mirror::Object* AllocNonMovableObject(Thread* self,
259                                         ObjPtr<mirror::Class> klass,
260                                         size_t num_bytes,
261                                         const PreFenceVisitor& pre_fence_visitor)
262       REQUIRES_SHARED(Locks::mutator_lock_)
263       REQUIRES(!*gc_complete_lock_,
264                !*pending_task_lock_,
265                !*backtrace_lock_,
266                !process_state_update_lock_,
267                !Roles::uninterruptible_) {
268     mirror::Object* obj = AllocObjectWithAllocator<kInstrumented>(self,
269                                                                   klass,
270                                                                   num_bytes,
271                                                                   GetCurrentNonMovingAllocator(),
272                                                                   pre_fence_visitor);
273     // Java Heap Profiler check and sample allocation.
274     JHPCheckNonTlabSampleAllocation(self, obj, num_bytes);
275     return obj;
276   }
277 
278   template <bool kInstrumented = true, bool kCheckLargeObject = true, typename PreFenceVisitor>
279   ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(Thread* self,
280                                                          ObjPtr<mirror::Class> klass,
281                                                          size_t byte_count,
282                                                          AllocatorType allocator,
283                                                          const PreFenceVisitor& pre_fence_visitor)
284       REQUIRES_SHARED(Locks::mutator_lock_)
285       REQUIRES(!*gc_complete_lock_,
286                !*pending_task_lock_,
287                !*backtrace_lock_,
288                !process_state_update_lock_,
289                !Roles::uninterruptible_);
290 
GetCurrentAllocator()291   AllocatorType GetCurrentAllocator() const {
292     return current_allocator_;
293   }
294 
GetCurrentNonMovingAllocator()295   AllocatorType GetCurrentNonMovingAllocator() const {
296     return current_non_moving_allocator_;
297   }
298 
GetUpdatedAllocator(AllocatorType old_allocator)299   AllocatorType GetUpdatedAllocator(AllocatorType old_allocator) {
300     return (old_allocator == kAllocatorTypeNonMoving) ?
301         GetCurrentNonMovingAllocator() : GetCurrentAllocator();
302   }
303 
304   // Visit all of the live objects in the heap.
305   template <typename Visitor>
306   ALWAYS_INLINE void VisitObjects(Visitor&& visitor)
307       REQUIRES_SHARED(Locks::mutator_lock_)
308       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
309   template <typename Visitor>
310   ALWAYS_INLINE void VisitObjectsPaused(Visitor&& visitor)
311       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
312 
313   void VisitReflectiveTargets(ReflectiveValueVisitor* visitor)
314       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
315 
316   void CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count)
317       REQUIRES_SHARED(Locks::mutator_lock_);
318 
319   // Inform the garbage collector of a non-malloc allocated native memory that might become
320   // reclaimable in the future as a result of Java garbage collection.
321   void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
322       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
323   void RegisterNativeFree(JNIEnv* env, size_t bytes);
324 
325   // Notify the garbage collector of malloc allocations that might be reclaimable
326   // as a result of Java garbage collection. Each such call represents approximately
327   // kNotifyNativeInterval such allocations.
328   void NotifyNativeAllocations(JNIEnv* env)
329       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
330 
GetNotifyNativeInterval()331   uint32_t GetNotifyNativeInterval() {
332     return kNotifyNativeInterval;
333   }
334 
335   // Change the allocator, updates entrypoints.
336   void ChangeAllocator(AllocatorType allocator)
337       REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_);
338 
339   // Change the collector to be one of the possible options (MS, CMS, SS). Only safe when no
340   // concurrent accesses to the heap are possible.
341   void ChangeCollector(CollectorType collector_type)
342       REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
343 
344   // The given reference is believed to be to an object in the Java heap, check the soundness of it.
345   // TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a
346   // proper lock ordering for it.
347   void VerifyObjectBody(ObjPtr<mirror::Object> o) NO_THREAD_SAFETY_ANALYSIS;
348 
349   // Consistency check of all live references.
350   void VerifyHeap() REQUIRES(!Locks::heap_bitmap_lock_);
351   // Returns how many failures occured.
352   size_t VerifyHeapReferences(bool verify_referents = true)
353       REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
354   bool VerifyMissingCardMarks()
355       REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
356 
357   // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
358   // and doesn't abort on error, allowing the caller to report more
359   // meaningful diagnostics.
360   bool IsValidObjectAddress(const void* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
361 
362   // Faster alternative to IsHeapAddress since finding if an object is in the large object space is
363   // very slow.
364   bool IsNonDiscontinuousSpaceHeapAddress(const void* addr) const
365       REQUIRES_SHARED(Locks::mutator_lock_);
366 
367   // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
368   // Requires the heap lock to be held.
369   bool IsLiveObjectLocked(ObjPtr<mirror::Object> obj,
370                           bool search_allocation_stack = true,
371                           bool search_live_stack = true,
372                           bool sorted = false)
373       REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
374 
375   // Returns true if there is any chance that the object (obj) will move.
376   bool IsMovableObject(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_);
377 
378   // Enables us to compacting GC until objects are released.
379   void IncrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
380   void DecrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
381 
382   // Temporarily disable thread flip for JNI critical calls.
383   void IncrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_);
384   void DecrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_);
385   void ThreadFlipBegin(Thread* self) REQUIRES(!*thread_flip_lock_);
386   void ThreadFlipEnd(Thread* self) REQUIRES(!*thread_flip_lock_);
387 
388   // Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits.
389   // Mutator lock is required for GetContinuousSpaces.
390   void ClearMarkedObjects()
391       REQUIRES(Locks::heap_bitmap_lock_)
392       REQUIRES_SHARED(Locks::mutator_lock_);
393 
394   // Initiates an explicit garbage collection. Guarantees that a GC started after this call has
395   // completed.
396   void CollectGarbage(bool clear_soft_references, GcCause cause = kGcCauseExplicit)
397       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
398 
399   // Does a concurrent GC, provided the GC numbered requested_gc_num has not already been
400   // completed. Should only be called by the GC daemon thread through runtime.
401   void ConcurrentGC(Thread* self, GcCause cause, bool force_full, uint32_t requested_gc_num)
402       REQUIRES(!Locks::runtime_shutdown_lock_, !*gc_complete_lock_,
403                !*pending_task_lock_, !process_state_update_lock_);
404 
405   // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
406   // The boolean decides whether to use IsAssignableFrom or == when comparing classes.
407   void CountInstances(const std::vector<Handle<mirror::Class>>& classes,
408                       bool use_is_assignable_from,
409                       uint64_t* counts)
410       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
411       REQUIRES_SHARED(Locks::mutator_lock_);
412 
413   // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
414   // implement dalvik.system.VMRuntime.clearGrowthLimit.
415   void ClearGrowthLimit() REQUIRES(!*gc_complete_lock_);
416 
417   // Make the current growth limit the new maximum capacity, unmaps pages at the end of spaces
418   // which will never be used. Used to implement dalvik.system.VMRuntime.clampGrowthLimit.
419   void ClampGrowthLimit() REQUIRES(!Locks::heap_bitmap_lock_);
420 
421   // Target ideal heap utilization ratio, implements
422   // dalvik.system.VMRuntime.getTargetHeapUtilization.
GetTargetHeapUtilization()423   double GetTargetHeapUtilization() const {
424     return target_utilization_;
425   }
426 
427   // Data structure memory usage tracking.
428   void RegisterGCAllocation(size_t bytes);
429   void RegisterGCDeAllocation(size_t bytes);
430 
431   // Set the heap's private space pointers to be the same as the space based on it's type. Public
432   // due to usage by tests.
433   void SetSpaceAsDefault(space::ContinuousSpace* continuous_space)
434       REQUIRES(!Locks::heap_bitmap_lock_);
435   void AddSpace(space::Space* space)
436       REQUIRES(!Locks::heap_bitmap_lock_)
437       REQUIRES(Locks::mutator_lock_);
438   void RemoveSpace(space::Space* space)
439     REQUIRES(!Locks::heap_bitmap_lock_)
440     REQUIRES(Locks::mutator_lock_);
441 
GetPreGcWeightedAllocatedBytes()442   double GetPreGcWeightedAllocatedBytes() const {
443     return pre_gc_weighted_allocated_bytes_;
444   }
445 
GetPostGcWeightedAllocatedBytes()446   double GetPostGcWeightedAllocatedBytes() const {
447     return post_gc_weighted_allocated_bytes_;
448   }
449 
450   void CalculatePreGcWeightedAllocatedBytes();
451   void CalculatePostGcWeightedAllocatedBytes();
452   uint64_t GetTotalGcCpuTime();
453 
GetProcessCpuStartTime()454   uint64_t GetProcessCpuStartTime() const {
455     return process_cpu_start_time_ns_;
456   }
457 
GetPostGCLastProcessCpuTime()458   uint64_t GetPostGCLastProcessCpuTime() const {
459     return post_gc_last_process_cpu_time_ns_;
460   }
461 
462   // Set target ideal heap utilization ratio, implements
463   // dalvik.system.VMRuntime.setTargetHeapUtilization.
464   void SetTargetHeapUtilization(float target);
465 
466   // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate
467   // from the system. Doesn't allow the space to exceed its growth limit.
468   // Set while we hold gc_complete_lock or collector_type_running_ != kCollectorTypeNone.
469   void SetIdealFootprint(size_t max_allowed_footprint);
470 
471   // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
472   // waited for. Only waits for running collections, ignoring a requested but unstarted GC. Only
473   // heuristic, since a new GC may have started by the time we return.
474   collector::GcType WaitForGcToComplete(GcCause cause, Thread* self) REQUIRES(!*gc_complete_lock_);
475 
476   // Update the heap's process state to a new value, may cause compaction to occur.
477   void UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state)
478       REQUIRES(!*pending_task_lock_, !*gc_complete_lock_, !process_state_update_lock_);
479 
HaveContinuousSpaces()480   bool HaveContinuousSpaces() const NO_THREAD_SAFETY_ANALYSIS {
481     // No lock since vector empty is thread safe.
482     return !continuous_spaces_.empty();
483   }
484 
GetContinuousSpaces()485   const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const
486       REQUIRES_SHARED(Locks::mutator_lock_) {
487     return continuous_spaces_;
488   }
489 
GetDiscontinuousSpaces()490   const std::vector<space::DiscontinuousSpace*>& GetDiscontinuousSpaces() const {
491     return discontinuous_spaces_;
492   }
493 
GetCurrentGcIteration()494   const collector::Iteration* GetCurrentGcIteration() const {
495     return &current_gc_iteration_;
496   }
GetCurrentGcIteration()497   collector::Iteration* GetCurrentGcIteration() {
498     return &current_gc_iteration_;
499   }
500 
501   // Enable verification of object references when the runtime is sufficiently initialized.
EnableObjectValidation()502   void EnableObjectValidation() {
503     verify_object_mode_ = kVerifyObjectSupport;
504     if (verify_object_mode_ > kVerifyObjectModeDisabled) {
505       VerifyHeap();
506     }
507   }
508 
509   // Disable object reference verification for image writing.
DisableObjectValidation()510   void DisableObjectValidation() {
511     verify_object_mode_ = kVerifyObjectModeDisabled;
512   }
513 
514   // Other checks may be performed if we know the heap should be in a healthy state.
IsObjectValidationEnabled()515   bool IsObjectValidationEnabled() const {
516     return verify_object_mode_ > kVerifyObjectModeDisabled;
517   }
518 
519   // Returns true if low memory mode is enabled.
IsLowMemoryMode()520   bool IsLowMemoryMode() const {
521     return low_memory_mode_;
522   }
523 
524   // Returns the heap growth multiplier, this affects how much we grow the heap after a GC.
525   // Scales heap growth, min free, and max free.
526   double HeapGrowthMultiplier() const;
527 
528   // Freed bytes can be negative in cases where we copy objects from a compacted space to a
529   // free-list backed space.
530   void RecordFree(uint64_t freed_objects, int64_t freed_bytes);
531 
532   // Record the bytes freed by thread-local buffer revoke.
533   void RecordFreeRevoke();
534 
GetCardTable()535   accounting::CardTable* GetCardTable() const {
536     return card_table_.get();
537   }
538 
GetReadBarrierTable()539   accounting::ReadBarrierTable* GetReadBarrierTable() const {
540     return rb_table_.get();
541   }
542 
543   void AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object);
544 
545   // Returns the number of bytes currently allocated.
546   // The result should be treated as an approximation, if it is being concurrently updated.
GetBytesAllocated()547   size_t GetBytesAllocated() const {
548     return num_bytes_allocated_.load(std::memory_order_relaxed);
549   }
550 
GetUseGenerationalCC()551   bool GetUseGenerationalCC() const {
552     return use_generational_cc_;
553   }
554 
555   // Returns the number of objects currently allocated.
556   size_t GetObjectsAllocated() const
557       REQUIRES(!Locks::heap_bitmap_lock_);
558 
559   // Returns the total number of objects allocated since the heap was created.
560   uint64_t GetObjectsAllocatedEver() const;
561 
562   // Returns the total number of bytes allocated since the heap was created.
563   uint64_t GetBytesAllocatedEver() const;
564 
565   // Returns the total number of objects freed since the heap was created.
566   // With default memory order, this should be viewed only as a hint.
567   uint64_t GetObjectsFreedEver(std::memory_order mo = std::memory_order_relaxed) const {
568     return total_objects_freed_ever_.load(mo);
569   }
570 
571   // Returns the total number of bytes freed since the heap was created.
572   // With default memory order, this should be viewed only as a hint.
573   uint64_t GetBytesFreedEver(std::memory_order mo = std::memory_order_relaxed) const {
574     return total_bytes_freed_ever_.load(mo);
575   }
576 
GetRegionSpace()577   space::RegionSpace* GetRegionSpace() const {
578     return region_space_;
579   }
580 
581   // Implements java.lang.Runtime.maxMemory, returning the maximum amount of memory a program can
582   // consume. For a regular VM this would relate to the -Xmx option and would return -1 if no Xmx
583   // were specified. Android apps start with a growth limit (small heap size) which is
584   // cleared/extended for large apps.
GetMaxMemory()585   size_t GetMaxMemory() const {
586     // There are some race conditions in the allocation code that can cause bytes allocated to
587     // become larger than growth_limit_ in rare cases.
588     return std::max(GetBytesAllocated(), growth_limit_);
589   }
590 
591   // Implements java.lang.Runtime.totalMemory, returning approximate amount of memory currently
592   // consumed by an application.
593   size_t GetTotalMemory() const;
594 
595   // Returns approximately how much free memory we have until the next GC happens.
GetFreeMemoryUntilGC()596   size_t GetFreeMemoryUntilGC() const {
597     return UnsignedDifference(target_footprint_.load(std::memory_order_relaxed),
598                               GetBytesAllocated());
599   }
600 
601   // Returns approximately how much free memory we have until the next OOME happens.
GetFreeMemoryUntilOOME()602   size_t GetFreeMemoryUntilOOME() const {
603     return UnsignedDifference(growth_limit_, GetBytesAllocated());
604   }
605 
606   // Returns how much free memory we have until we need to grow the heap to perform an allocation.
607   // Similar to GetFreeMemoryUntilGC. Implements java.lang.Runtime.freeMemory.
GetFreeMemory()608   size_t GetFreeMemory() const {
609     return UnsignedDifference(GetTotalMemory(),
610                               num_bytes_allocated_.load(std::memory_order_relaxed));
611   }
612 
613   // Get the space that corresponds to an object's address. Current implementation searches all
614   // spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
615   // TODO: consider using faster data structure like binary tree.
616   space::ContinuousSpace* FindContinuousSpaceFromObject(ObjPtr<mirror::Object>, bool fail_ok) const
617       REQUIRES_SHARED(Locks::mutator_lock_);
618 
619   space::ContinuousSpace* FindContinuousSpaceFromAddress(const mirror::Object* addr) const
620       REQUIRES_SHARED(Locks::mutator_lock_);
621 
622   space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object>,
623                                                               bool fail_ok) const
624       REQUIRES_SHARED(Locks::mutator_lock_);
625 
626   space::Space* FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const
627       REQUIRES_SHARED(Locks::mutator_lock_);
628 
629   space::Space* FindSpaceFromAddress(const void* ptr) const
630       REQUIRES_SHARED(Locks::mutator_lock_);
631 
632   std::string DumpSpaceNameFromAddress(const void* addr) const
633       REQUIRES_SHARED(Locks::mutator_lock_);
634 
635   void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_);
636 
637   // Do a pending collector transition.
638   void DoPendingCollectorTransition()
639       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
640 
641   // Deflate monitors, ... and trim the spaces.
642   void Trim(Thread* self) REQUIRES(!*gc_complete_lock_);
643 
644   void RevokeThreadLocalBuffers(Thread* thread);
645   void RevokeRosAllocThreadLocalBuffers(Thread* thread);
646   void RevokeAllThreadLocalBuffers();
647   void AssertThreadLocalBuffersAreRevoked(Thread* thread);
648   void AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
649   void RosAllocVerification(TimingLogger* timings, const char* name)
650       REQUIRES(Locks::mutator_lock_);
651 
GetLiveBitmap()652   accounting::HeapBitmap* GetLiveBitmap() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
653     return live_bitmap_.get();
654   }
655 
GetMarkBitmap()656   accounting::HeapBitmap* GetMarkBitmap() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
657     return mark_bitmap_.get();
658   }
659 
GetLiveStack()660   accounting::ObjectStack* GetLiveStack() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
661     return live_stack_.get();
662   }
663 
664   void PreZygoteFork() NO_THREAD_SAFETY_ANALYSIS;
665 
666   // Mark and empty stack.
667   void FlushAllocStack()
668       REQUIRES_SHARED(Locks::mutator_lock_)
669       REQUIRES(Locks::heap_bitmap_lock_);
670 
671   // Revoke all the thread-local allocation stacks.
672   void RevokeAllThreadLocalAllocationStacks(Thread* self)
673       REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_);
674 
675   // Mark all the objects in the allocation stack in the specified bitmap.
676   // TODO: Refactor?
677   void MarkAllocStack(accounting::SpaceBitmap<kObjectAlignment>* bitmap1,
678                       accounting::SpaceBitmap<kObjectAlignment>* bitmap2,
679                       accounting::SpaceBitmap<kLargeObjectAlignment>* large_objects,
680                       accounting::ObjectStack* stack)
681       REQUIRES_SHARED(Locks::mutator_lock_)
682       REQUIRES(Locks::heap_bitmap_lock_);
683 
684   // Mark the specified allocation stack as live.
685   void MarkAllocStackAsLive(accounting::ObjectStack* stack)
686       REQUIRES_SHARED(Locks::mutator_lock_)
687       REQUIRES(Locks::heap_bitmap_lock_);
688 
689   // Unbind any bound bitmaps.
690   void UnBindBitmaps()
691       REQUIRES(Locks::heap_bitmap_lock_)
692       REQUIRES_SHARED(Locks::mutator_lock_);
693 
694   // Returns the boot image spaces. There may be multiple boot image spaces.
GetBootImageSpaces()695   const std::vector<space::ImageSpace*>& GetBootImageSpaces() const {
696     return boot_image_spaces_;
697   }
698 
699   bool ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const
700       REQUIRES_SHARED(Locks::mutator_lock_);
701 
702   bool IsInBootImageOatFile(const void* p) const
703       REQUIRES_SHARED(Locks::mutator_lock_);
704 
705   // Get the start address of the boot images if any; otherwise returns 0.
GetBootImagesStartAddress()706   uint32_t GetBootImagesStartAddress() const {
707     return boot_images_start_address_;
708   }
709 
710   // Get the size of all boot images, including the heap and oat areas.
GetBootImagesSize()711   uint32_t GetBootImagesSize() const {
712     return boot_images_size_;
713   }
714 
715   // Check if a pointer points to a boot image.
IsBootImageAddress(const void * p)716   bool IsBootImageAddress(const void* p) const {
717     return reinterpret_cast<uintptr_t>(p) - boot_images_start_address_ < boot_images_size_;
718   }
719 
GetDlMallocSpace()720   space::DlMallocSpace* GetDlMallocSpace() const {
721     return dlmalloc_space_;
722   }
723 
GetRosAllocSpace()724   space::RosAllocSpace* GetRosAllocSpace() const {
725     return rosalloc_space_;
726   }
727 
728   // Return the corresponding rosalloc space.
729   space::RosAllocSpace* GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const
730       REQUIRES_SHARED(Locks::mutator_lock_);
731 
GetNonMovingSpace()732   space::MallocSpace* GetNonMovingSpace() const {
733     return non_moving_space_;
734   }
735 
GetLargeObjectsSpace()736   space::LargeObjectSpace* GetLargeObjectsSpace() const {
737     return large_object_space_;
738   }
739 
740   // Returns the free list space that may contain movable objects (the
741   // one that's not the non-moving space), either rosalloc_space_ or
742   // dlmalloc_space_.
GetPrimaryFreeListSpace()743   space::MallocSpace* GetPrimaryFreeListSpace() {
744     if (kUseRosAlloc) {
745       DCHECK(rosalloc_space_ != nullptr);
746       // reinterpret_cast is necessary as the space class hierarchy
747       // isn't known (#included) yet here.
748       return reinterpret_cast<space::MallocSpace*>(rosalloc_space_);
749     } else {
750       DCHECK(dlmalloc_space_ != nullptr);
751       return reinterpret_cast<space::MallocSpace*>(dlmalloc_space_);
752     }
753   }
754 
755   void DumpSpaces(std::ostream& stream) const REQUIRES_SHARED(Locks::mutator_lock_);
756   std::string DumpSpaces() const REQUIRES_SHARED(Locks::mutator_lock_);
757 
758   // GC performance measuring
759   void DumpGcPerformanceInfo(std::ostream& os)
760       REQUIRES(!*gc_complete_lock_);
761   void ResetGcPerformanceInfo() REQUIRES(!*gc_complete_lock_);
762 
763   // Thread pool.
764   void CreateThreadPool();
765   void DeleteThreadPool();
GetThreadPool()766   ThreadPool* GetThreadPool() {
767     return thread_pool_.get();
768   }
GetParallelGCThreadCount()769   size_t GetParallelGCThreadCount() const {
770     return parallel_gc_threads_;
771   }
GetConcGCThreadCount()772   size_t GetConcGCThreadCount() const {
773     return conc_gc_threads_;
774   }
775   accounting::ModUnionTable* FindModUnionTableFromSpace(space::Space* space);
776   void AddModUnionTable(accounting::ModUnionTable* mod_union_table);
777 
778   accounting::RememberedSet* FindRememberedSetFromSpace(space::Space* space);
779   void AddRememberedSet(accounting::RememberedSet* remembered_set);
780   // Also deletes the remebered set.
781   void RemoveRememberedSet(space::Space* space);
782 
783   bool IsCompilingBoot() const;
HasBootImageSpace()784   bool HasBootImageSpace() const {
785     return !boot_image_spaces_.empty();
786   }
787 
GetReferenceProcessor()788   ReferenceProcessor* GetReferenceProcessor() {
789     return reference_processor_.get();
790   }
GetTaskProcessor()791   TaskProcessor* GetTaskProcessor() {
792     return task_processor_.get();
793   }
794 
HasZygoteSpace()795   bool HasZygoteSpace() const {
796     return zygote_space_ != nullptr;
797   }
798 
799   // Returns the active concurrent copying collector.
ConcurrentCopyingCollector()800   collector::ConcurrentCopying* ConcurrentCopyingCollector() {
801     collector::ConcurrentCopying* active_collector =
802             active_concurrent_copying_collector_.load(std::memory_order_relaxed);
803     if (use_generational_cc_) {
804       DCHECK((active_collector == concurrent_copying_collector_) ||
805              (active_collector == young_concurrent_copying_collector_))
806               << "active_concurrent_copying_collector: " << active_collector
807               << " young_concurrent_copying_collector: " << young_concurrent_copying_collector_
808               << " concurrent_copying_collector: " << concurrent_copying_collector_;
809     } else {
810       DCHECK_EQ(active_collector, concurrent_copying_collector_);
811     }
812     return active_collector;
813   }
814 
CurrentCollectorType()815   CollectorType CurrentCollectorType() {
816     return collector_type_;
817   }
818 
IsGcConcurrentAndMoving()819   bool IsGcConcurrentAndMoving() const {
820     if (IsGcConcurrent() && IsMovingGc(collector_type_)) {
821       // Assume no transition when a concurrent moving collector is used.
822       DCHECK_EQ(collector_type_, foreground_collector_type_);
823       return true;
824     }
825     return false;
826   }
827 
IsMovingGCDisabled(Thread * self)828   bool IsMovingGCDisabled(Thread* self) REQUIRES(!*gc_complete_lock_) {
829     MutexLock mu(self, *gc_complete_lock_);
830     return disable_moving_gc_count_ > 0;
831   }
832 
833   // Request an asynchronous trim.
834   void RequestTrim(Thread* self) REQUIRES(!*pending_task_lock_);
835 
836   // Retrieve the current GC number, i.e. the number n such that we completed n GCs so far.
837   // Provides acquire ordering, so that if we read this first, and then check whether a GC is
838   // required, we know that the GC number read actually preceded the test.
GetCurrentGcNum()839   uint32_t GetCurrentGcNum() {
840     return gcs_completed_.load(std::memory_order_acquire);
841   }
842 
843   // Request asynchronous GC. Observed_gc_num is the value of GetCurrentGcNum() when we started to
844   // evaluate the GC triggering condition. If a GC has been completed since then, we consider our
845   // job done. If we return true, then we ensured that gcs_completed_ will eventually be
846   // incremented beyond observed_gc_num. We return false only in corner cases in which we cannot
847   // ensure that.
848   bool RequestConcurrentGC(Thread* self, GcCause cause, bool force_full, uint32_t observed_gc_num)
849       REQUIRES(!*pending_task_lock_);
850 
851   // Whether or not we may use a garbage collector, used so that we only create collectors we need.
852   bool MayUseCollector(CollectorType type) const;
853 
854   // Used by tests to reduce timinig-dependent flakiness in OOME behavior.
SetMinIntervalHomogeneousSpaceCompactionByOom(uint64_t interval)855   void SetMinIntervalHomogeneousSpaceCompactionByOom(uint64_t interval) {
856     min_interval_homogeneous_space_compaction_by_oom_ = interval;
857   }
858 
859   // Helpers for android.os.Debug.getRuntimeStat().
860   uint64_t GetGcCount() const;
861   uint64_t GetGcTime() const;
862   uint64_t GetBlockingGcCount() const;
863   uint64_t GetBlockingGcTime() const;
864   void DumpGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_);
865   void DumpBlockingGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_);
GetTotalTimeWaitingForGC()866   uint64_t GetTotalTimeWaitingForGC() const {
867     return total_wait_time_;
868   }
869   uint64_t GetPreOomeGcCount() const;
870 
871   // Perfetto Art Heap Profiler Support.
GetHeapSampler()872   HeapSampler& GetHeapSampler() {
873     return heap_sampler_;
874   }
875 
876   void InitPerfettoJavaHeapProf();
877   int CheckPerfettoJHPEnabled();
878   // In NonTlab case: Check whether we should report a sample allocation and if so report it.
879   // Also update state (bytes_until_sample).
880   // By calling JHPCheckNonTlabSampleAllocation from different functions for Large allocations and
881   // non-moving allocations we are able to use the stack to identify these allocations separately.
882   void JHPCheckNonTlabSampleAllocation(Thread* self,
883                                        mirror::Object* ret,
884                                        size_t alloc_size);
885   // In Tlab case: Calculate the next tlab size (location of next sample point) and whether
886   // a sample should be taken.
887   size_t JHPCalculateNextTlabSize(Thread* self,
888                                   size_t jhp_def_tlab_size,
889                                   size_t alloc_size,
890                                   bool* take_sample,
891                                   size_t* bytes_until_sample);
892   // Reduce the number of bytes to the next sample position by this adjustment.
893   void AdjustSampleOffset(size_t adjustment);
894 
895   // Allocation tracking support
896   // Callers to this function use double-checked locking to ensure safety on allocation_records_
IsAllocTrackingEnabled()897   bool IsAllocTrackingEnabled() const {
898     return alloc_tracking_enabled_.load(std::memory_order_relaxed);
899   }
900 
SetAllocTrackingEnabled(bool enabled)901   void SetAllocTrackingEnabled(bool enabled) REQUIRES(Locks::alloc_tracker_lock_) {
902     alloc_tracking_enabled_.store(enabled, std::memory_order_relaxed);
903   }
904 
905   // Return the current stack depth of allocation records.
GetAllocTrackerStackDepth()906   size_t GetAllocTrackerStackDepth() const {
907     return alloc_record_depth_;
908   }
909 
910   // Return the current stack depth of allocation records.
SetAllocTrackerStackDepth(size_t alloc_record_depth)911   void SetAllocTrackerStackDepth(size_t alloc_record_depth) {
912     alloc_record_depth_ = alloc_record_depth;
913   }
914 
GetAllocationRecords()915   AllocRecordObjectMap* GetAllocationRecords() const REQUIRES(Locks::alloc_tracker_lock_) {
916     return allocation_records_.get();
917   }
918 
919   void SetAllocationRecords(AllocRecordObjectMap* records)
920       REQUIRES(Locks::alloc_tracker_lock_);
921 
922   void VisitAllocationRecords(RootVisitor* visitor) const
923       REQUIRES_SHARED(Locks::mutator_lock_)
924       REQUIRES(!Locks::alloc_tracker_lock_);
925 
926   void SweepAllocationRecords(IsMarkedVisitor* visitor) const
927       REQUIRES_SHARED(Locks::mutator_lock_)
928       REQUIRES(!Locks::alloc_tracker_lock_);
929 
930   void DisallowNewAllocationRecords() const
931       REQUIRES_SHARED(Locks::mutator_lock_)
932       REQUIRES(!Locks::alloc_tracker_lock_);
933 
934   void AllowNewAllocationRecords() const
935       REQUIRES_SHARED(Locks::mutator_lock_)
936       REQUIRES(!Locks::alloc_tracker_lock_);
937 
938   void BroadcastForNewAllocationRecords() const
939       REQUIRES(!Locks::alloc_tracker_lock_);
940 
941   void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_);
942 
943   // Create a new alloc space and compact default alloc space to it.
944   HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact()
945       REQUIRES(!*gc_complete_lock_, !process_state_update_lock_);
946   bool SupportHomogeneousSpaceCompactAndCollectorTransitions() const;
947 
948   // Install an allocation listener.
949   void SetAllocationListener(AllocationListener* l);
950   // Remove an allocation listener. Note: the listener must not be deleted, as for performance
951   // reasons, we assume it stays valid when we read it (so that we don't require a lock).
952   void RemoveAllocationListener();
953 
954   // Install a gc pause listener.
955   void SetGcPauseListener(GcPauseListener* l);
956   // Get the currently installed gc pause listener, or null.
GetGcPauseListener()957   GcPauseListener* GetGcPauseListener() {
958     return gc_pause_listener_.load(std::memory_order_acquire);
959   }
960   // Remove a gc pause listener. Note: the listener must not be deleted, as for performance
961   // reasons, we assume it stays valid when we read it (so that we don't require a lock).
962   void RemoveGcPauseListener();
963 
964   const Verification* GetVerification() const;
965 
966   void PostForkChildAction(Thread* self) REQUIRES(!*gc_complete_lock_);
967 
968   void TraceHeapSize(size_t heap_size);
969 
970   bool AddHeapTask(gc::HeapTask* task);
971 
972  private:
973   class ConcurrentGCTask;
974   class CollectorTransitionTask;
975   class HeapTrimTask;
976   class TriggerPostForkCCGcTask;
977   class ReduceTargetFootprintTask;
978 
979   // Compact source space to target space. Returns the collector used.
980   collector::GarbageCollector* Compact(space::ContinuousMemMapAllocSpace* target_space,
981                                        space::ContinuousMemMapAllocSpace* source_space,
982                                        GcCause gc_cause)
983       REQUIRES(Locks::mutator_lock_);
984 
985   void LogGC(GcCause gc_cause, collector::GarbageCollector* collector);
986   void StartGC(Thread* self, GcCause cause, CollectorType collector_type)
987       REQUIRES(!*gc_complete_lock_);
988   void FinishGC(Thread* self, collector::GcType gc_type) REQUIRES(!*gc_complete_lock_);
989 
990   double CalculateGcWeightedAllocatedBytes(uint64_t gc_last_process_cpu_time_ns,
991                                            uint64_t current_process_cpu_time) const;
992 
993   // Create a mem map with a preferred base address.
994   static MemMap MapAnonymousPreferredAddress(const char* name,
995                                              uint8_t* request_begin,
996                                              size_t capacity,
997                                              std::string* out_error_str);
998 
SupportHSpaceCompaction()999   bool SupportHSpaceCompaction() const {
1000     // Returns true if we can do hspace compaction
1001     return main_space_backup_ != nullptr;
1002   }
1003 
1004   // Attempt to use all the userfaultfd related ioctls.
1005   void MaybePerformUffdIoctls(GcCause cause, uint32_t requested_gc_num) const;
1006 
1007   // Size_t saturating arithmetic
UnsignedDifference(size_t x,size_t y)1008   static ALWAYS_INLINE size_t UnsignedDifference(size_t x, size_t y) {
1009     return x > y ? x - y : 0;
1010   }
UnsignedSum(size_t x,size_t y)1011   static ALWAYS_INLINE size_t UnsignedSum(size_t x, size_t y) {
1012     return x + y >= x ? x + y : std::numeric_limits<size_t>::max();
1013   }
1014 
AllocatorHasAllocationStack(AllocatorType allocator_type)1015   static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) {
1016     return
1017         allocator_type != kAllocatorTypeRegionTLAB &&
1018         allocator_type != kAllocatorTypeBumpPointer &&
1019         allocator_type != kAllocatorTypeTLAB &&
1020         allocator_type != kAllocatorTypeRegion;
1021   }
AllocatorMayHaveConcurrentGC(AllocatorType allocator_type)1022   static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) {
1023     if (kUseReadBarrier) {
1024       // Read barrier may have the TLAB allocator but is always concurrent. TODO: clean this up.
1025       return true;
1026     }
1027     return
1028         allocator_type != kAllocatorTypeTLAB &&
1029         allocator_type != kAllocatorTypeBumpPointer;
1030   }
IsMovingGc(CollectorType collector_type)1031   static bool IsMovingGc(CollectorType collector_type) {
1032     return
1033         collector_type == kCollectorTypeCC ||
1034         collector_type == kCollectorTypeSS ||
1035         collector_type == kCollectorTypeCCBackground ||
1036         collector_type == kCollectorTypeHomogeneousSpaceCompact;
1037   }
1038   bool ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const
1039       REQUIRES_SHARED(Locks::mutator_lock_);
1040 
1041   // Checks whether we should garbage collect:
1042   ALWAYS_INLINE bool ShouldConcurrentGCForJava(size_t new_num_bytes_allocated);
1043   float NativeMemoryOverTarget(size_t current_native_bytes, bool is_gc_concurrent);
1044   void CheckGCForNative(Thread* self)
1045       REQUIRES(!*pending_task_lock_, !*gc_complete_lock_, !process_state_update_lock_);
1046 
GetMarkStack()1047   accounting::ObjectStack* GetMarkStack() {
1048     return mark_stack_.get();
1049   }
1050 
1051   // We don't force this to be inlined since it is a slow path.
1052   template <bool kInstrumented, typename PreFenceVisitor>
1053   mirror::Object* AllocLargeObject(Thread* self,
1054                                    ObjPtr<mirror::Class>* klass,
1055                                    size_t byte_count,
1056                                    const PreFenceVisitor& pre_fence_visitor)
1057       REQUIRES_SHARED(Locks::mutator_lock_)
1058       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_,
1059                !*backtrace_lock_, !process_state_update_lock_);
1060 
1061   // Handles Allocate()'s slow allocation path with GC involved after an initial allocation
1062   // attempt failed.
1063   // Called with thread suspension disallowed, but re-enables it, and may suspend, internally.
1064   // Returns null if instrumentation or the allocator changed.
1065   mirror::Object* AllocateInternalWithGc(Thread* self,
1066                                          AllocatorType allocator,
1067                                          bool instrumented,
1068                                          size_t num_bytes,
1069                                          size_t* bytes_allocated,
1070                                          size_t* usable_size,
1071                                          size_t* bytes_tl_bulk_allocated,
1072                                          ObjPtr<mirror::Class>* klass)
1073       REQUIRES(!Locks::thread_suspend_count_lock_, !*gc_complete_lock_, !*pending_task_lock_)
1074       REQUIRES(Roles::uninterruptible_)
1075       REQUIRES_SHARED(Locks::mutator_lock_);
1076 
1077   // Allocate into a specific space.
1078   mirror::Object* AllocateInto(Thread* self,
1079                                space::AllocSpace* space,
1080                                ObjPtr<mirror::Class> c,
1081                                size_t bytes)
1082       REQUIRES_SHARED(Locks::mutator_lock_);
1083 
1084   // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
1085   // wrong space.
1086   void SwapSemiSpaces() REQUIRES(Locks::mutator_lock_);
1087 
1088   // Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so
1089   // that the switch statement is constant optimized in the entrypoints.
1090   template <const bool kInstrumented, const bool kGrow>
1091   ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self,
1092                                               AllocatorType allocator_type,
1093                                               size_t alloc_size,
1094                                               size_t* bytes_allocated,
1095                                               size_t* usable_size,
1096                                               size_t* bytes_tl_bulk_allocated)
1097       REQUIRES_SHARED(Locks::mutator_lock_);
1098 
1099   mirror::Object* AllocWithNewTLAB(Thread* self,
1100                                    AllocatorType allocator_type,
1101                                    size_t alloc_size,
1102                                    bool grow,
1103                                    size_t* bytes_allocated,
1104                                    size_t* usable_size,
1105                                    size_t* bytes_tl_bulk_allocated)
1106       REQUIRES_SHARED(Locks::mutator_lock_);
1107 
1108   void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type)
1109       REQUIRES_SHARED(Locks::mutator_lock_);
1110 
1111   // Are we out of memory, and thus should force a GC or fail?
1112   // For concurrent collectors, out of memory is defined by growth_limit_.
1113   // For nonconcurrent collectors it is defined by target_footprint_ unless grow is
1114   // set. If grow is set, the limit is growth_limit_ and we adjust target_footprint_
1115   // to accomodate the allocation.
1116   ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
1117                                                size_t alloc_size,
1118                                                bool grow);
1119 
1120   // Run the finalizers. If timeout is non zero, then we use the VMRuntime version.
1121   void RunFinalization(JNIEnv* env, uint64_t timeout);
1122 
1123   // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
1124   // waited for.
1125   collector::GcType WaitForGcToCompleteLocked(GcCause cause, Thread* self)
1126       REQUIRES(gc_complete_lock_);
1127 
1128   void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
1129       REQUIRES(!*pending_task_lock_);
1130 
1131   void RequestConcurrentGCAndSaveObject(Thread* self,
1132                                         bool force_full,
1133                                         uint32_t observed_gc_num,
1134                                         ObjPtr<mirror::Object>* obj)
1135       REQUIRES_SHARED(Locks::mutator_lock_)
1136       REQUIRES(!*pending_task_lock_);
1137 
1138   static constexpr uint32_t GC_NUM_ANY = std::numeric_limits<uint32_t>::max();
1139 
1140   // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
1141   // which type of Gc was actually run.
1142   // We pass in the intended GC sequence number to ensure that multiple approximately concurrent
1143   // requests result in a single GC; clearly redundant request will be pruned.  A requested_gc_num
1144   // of GC_NUM_ANY indicates that we should not prune redundant requests.  (In the unlikely case
1145   // that gcs_completed_ gets this big, we just accept a potential extra GC or two.)
1146   collector::GcType CollectGarbageInternal(collector::GcType gc_plan,
1147                                            GcCause gc_cause,
1148                                            bool clear_soft_references,
1149                                            uint32_t requested_gc_num)
1150       REQUIRES(!*gc_complete_lock_, !Locks::heap_bitmap_lock_, !Locks::thread_suspend_count_lock_,
1151                !*pending_task_lock_, !process_state_update_lock_);
1152 
1153   void PreGcVerification(collector::GarbageCollector* gc)
1154       REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_);
1155   void PreGcVerificationPaused(collector::GarbageCollector* gc)
1156       REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
1157   void PrePauseRosAllocVerification(collector::GarbageCollector* gc)
1158       REQUIRES(Locks::mutator_lock_);
1159   void PreSweepingGcVerification(collector::GarbageCollector* gc)
1160       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
1161   void PostGcVerification(collector::GarbageCollector* gc)
1162       REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_);
1163   void PostGcVerificationPaused(collector::GarbageCollector* gc)
1164       REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
1165 
1166   // Find a collector based on GC type.
1167   collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
1168 
1169   // Create the main free list malloc space, either a RosAlloc space or DlMalloc space.
1170   void CreateMainMallocSpace(MemMap&& mem_map,
1171                              size_t initial_size,
1172                              size_t growth_limit,
1173                              size_t capacity);
1174 
1175   // Create a malloc space based on a mem map. Does not set the space as default.
1176   space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap&& mem_map,
1177                                                   size_t initial_size,
1178                                                   size_t growth_limit,
1179                                                   size_t capacity,
1180                                                   const char* name,
1181                                                   bool can_move_objects);
1182 
1183   // Given the current contents of the alloc space, increase the allowed heap footprint to match
1184   // the target utilization ratio.  This should only be called immediately after a full garbage
1185   // collection. bytes_allocated_before_gc is used to measure bytes / second for the period which
1186   // the GC was run.
1187   // This is only called by the thread that set collector_type_running_ to a value other than
1188   // kCollectorTypeNone, or while holding gc_complete_lock, and ensuring that
1189   // collector_type_running_ is kCollectorTypeNone.
1190   void GrowForUtilization(collector::GarbageCollector* collector_ran,
1191                           size_t bytes_allocated_before_gc = 0)
1192       REQUIRES(!process_state_update_lock_);
1193 
1194   size_t GetPercentFree();
1195 
1196   // Swap the allocation stack with the live stack.
1197   void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_);
1198 
1199   // Clear cards and update the mod union table. When process_alloc_space_cards is true,
1200   // if clear_alloc_space_cards is true, then we clear cards instead of ageing them. We do
1201   // not process the alloc space if process_alloc_space_cards is false.
1202   void ProcessCards(TimingLogger* timings,
1203                     bool use_rem_sets,
1204                     bool process_alloc_space_cards,
1205                     bool clear_alloc_space_cards)
1206       REQUIRES_SHARED(Locks::mutator_lock_);
1207 
1208   // Push an object onto the allocation stack.
1209   void PushOnAllocationStack(Thread* self, ObjPtr<mirror::Object>* obj)
1210       REQUIRES_SHARED(Locks::mutator_lock_)
1211       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
1212   void PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj)
1213       REQUIRES_SHARED(Locks::mutator_lock_)
1214       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
1215   void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, ObjPtr<mirror::Object>* obj)
1216       REQUIRES_SHARED(Locks::mutator_lock_)
1217       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
1218 
1219   void ClearPendingTrim(Thread* self) REQUIRES(!*pending_task_lock_);
1220   void ClearPendingCollectorTransition(Thread* self) REQUIRES(!*pending_task_lock_);
1221 
1222   // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
1223   // sweep GC, false for other GC types.
IsGcConcurrent()1224   bool IsGcConcurrent() const ALWAYS_INLINE {
1225     return collector_type_ == kCollectorTypeCC ||
1226         collector_type_ == kCollectorTypeCMS ||
1227         collector_type_ == kCollectorTypeCCBackground;
1228   }
1229 
1230   // Trim the managed and native spaces by releasing unused memory back to the OS.
1231   void TrimSpaces(Thread* self) REQUIRES(!*gc_complete_lock_);
1232 
1233   // Trim 0 pages at the end of reference tables.
1234   void TrimIndirectReferenceTables(Thread* self);
1235 
1236   template <typename Visitor>
1237   ALWAYS_INLINE void VisitObjectsInternal(Visitor&& visitor)
1238       REQUIRES_SHARED(Locks::mutator_lock_)
1239       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
1240   template <typename Visitor>
1241   ALWAYS_INLINE void VisitObjectsInternalRegionSpace(Visitor&& visitor)
1242       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
1243 
1244   void UpdateGcCountRateHistograms() REQUIRES(gc_complete_lock_);
1245 
1246   // GC stress mode attempts to do one GC per unique backtrace.
1247   void CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj)
1248       REQUIRES_SHARED(Locks::mutator_lock_)
1249       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_,
1250                !*backtrace_lock_, !process_state_update_lock_);
1251 
NonStickyGcType()1252   collector::GcType NonStickyGcType() const {
1253     return HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
1254   }
1255 
1256   // Return the amount of space we allow for native memory when deciding whether to
1257   // collect. We collect when a weighted sum of Java memory plus native memory exceeds
1258   // the similarly weighted sum of the Java heap size target and this value.
NativeAllocationGcWatermark()1259   ALWAYS_INLINE size_t NativeAllocationGcWatermark() const {
1260     // We keep the traditional limit of max_free_ in place for small heaps,
1261     // but allow it to be adjusted upward for large heaps to limit GC overhead.
1262     return target_footprint_.load(std::memory_order_relaxed) / 8 + max_free_;
1263   }
1264 
1265   ALWAYS_INLINE void IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke);
1266 
1267   // On switching app from background to foreground, grow the heap size
1268   // to incorporate foreground heap growth multiplier.
1269   void GrowHeapOnJankPerceptibleSwitch() REQUIRES(!process_state_update_lock_);
1270 
1271   // Update *_freed_ever_ counters to reflect current GC values.
1272   void IncrementFreedEver();
1273 
1274   // Remove a vlog code from heap-inl.h which is transitively included in half the world.
1275   static void VlogHeapGrowth(size_t max_allowed_footprint, size_t new_footprint, size_t alloc_size);
1276 
1277   // Return our best approximation of the number of bytes of native memory that
1278   // are currently in use, and could possibly be reclaimed as an indirect result
1279   // of a garbage collection.
1280   size_t GetNativeBytes();
1281 
1282   // Set concurrent_start_bytes_ to a reasonable guess, given target_footprint_ .
1283   void SetDefaultConcurrentStartBytes() REQUIRES(!*gc_complete_lock_);
1284   // This version assumes no concurrent updaters.
1285   void SetDefaultConcurrentStartBytesLocked();
1286 
1287   // All-known continuous spaces, where objects lie within fixed bounds.
1288   std::vector<space::ContinuousSpace*> continuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
1289 
1290   // All-known discontinuous spaces, where objects may be placed throughout virtual memory.
1291   std::vector<space::DiscontinuousSpace*> discontinuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
1292 
1293   // All-known alloc spaces, where objects may be or have been allocated.
1294   std::vector<space::AllocSpace*> alloc_spaces_;
1295 
1296   // A space where non-movable objects are allocated, when compaction is enabled it contains
1297   // Classes, ArtMethods, ArtFields, and non moving objects.
1298   space::MallocSpace* non_moving_space_;
1299 
1300   // Space which we use for the kAllocatorTypeROSAlloc.
1301   space::RosAllocSpace* rosalloc_space_;
1302 
1303   // Space which we use for the kAllocatorTypeDlMalloc.
1304   space::DlMallocSpace* dlmalloc_space_;
1305 
1306   // The main space is the space which the GC copies to and from on process state updates. This
1307   // space is typically either the dlmalloc_space_ or the rosalloc_space_.
1308   space::MallocSpace* main_space_;
1309 
1310   // The large object space we are currently allocating into.
1311   space::LargeObjectSpace* large_object_space_;
1312 
1313   // The card table, dirtied by the write barrier.
1314   std::unique_ptr<accounting::CardTable> card_table_;
1315 
1316   std::unique_ptr<accounting::ReadBarrierTable> rb_table_;
1317 
1318   // A mod-union table remembers all of the references from the it's space to other spaces.
1319   AllocationTrackingSafeMap<space::Space*, accounting::ModUnionTable*, kAllocatorTagHeap>
1320       mod_union_tables_;
1321 
1322   // A remembered set remembers all of the references from the it's space to the target space.
1323   AllocationTrackingSafeMap<space::Space*, accounting::RememberedSet*, kAllocatorTagHeap>
1324       remembered_sets_;
1325 
1326   // The current collector type.
1327   CollectorType collector_type_;
1328   // Which collector we use when the app is in the foreground.
1329   CollectorType foreground_collector_type_;
1330   // Which collector we will use when the app is notified of a transition to background.
1331   CollectorType background_collector_type_;
1332   // Desired collector type, heap trimming daemon transitions the heap if it is != collector_type_.
1333   CollectorType desired_collector_type_;
1334 
1335   // Lock which guards pending tasks.
1336   Mutex* pending_task_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1337 
1338   // How many GC threads we may use for paused parts of garbage collection.
1339   const size_t parallel_gc_threads_;
1340 
1341   // How many GC threads we may use for unpaused parts of garbage collection.
1342   const size_t conc_gc_threads_;
1343 
1344   // Boolean for if we are in low memory mode.
1345   const bool low_memory_mode_;
1346 
1347   // If we get a pause longer than long pause log threshold, then we print out the GC after it
1348   // finishes.
1349   const size_t long_pause_log_threshold_;
1350 
1351   // If we get a GC longer than long GC log threshold, then we print out the GC after it finishes.
1352   const size_t long_gc_log_threshold_;
1353 
1354   // Starting time of the new process; meant to be used for measuring total process CPU time.
1355   uint64_t process_cpu_start_time_ns_;
1356 
1357   // Last time (before and after) GC started; meant to be used to measure the
1358   // duration between two GCs.
1359   uint64_t pre_gc_last_process_cpu_time_ns_;
1360   uint64_t post_gc_last_process_cpu_time_ns_;
1361 
1362   // allocated_bytes * (current_process_cpu_time - [pre|post]_gc_last_process_cpu_time)
1363   double pre_gc_weighted_allocated_bytes_;
1364   double post_gc_weighted_allocated_bytes_;
1365 
1366   // If we ignore the target footprint it lets the heap grow until it hits the heap capacity, this
1367   // is useful for benchmarking since it reduces time spent in GC to a low %.
1368   const bool ignore_target_footprint_;
1369 
1370   // If we are running tests or some other configurations we might not actually
1371   // want logs for explicit gcs since they can get spammy.
1372   const bool always_log_explicit_gcs_;
1373 
1374   // Lock which guards zygote space creation.
1375   Mutex zygote_creation_lock_;
1376 
1377   // Non-null iff we have a zygote space. Doesn't contain the large objects allocated before
1378   // zygote space creation.
1379   space::ZygoteSpace* zygote_space_;
1380 
1381   // Minimum allocation size of large object.
1382   size_t large_object_threshold_;
1383 
1384   // Guards access to the state of GC, associated conditional variable is used to signal when a GC
1385   // completes.
1386   Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1387   std::unique_ptr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
1388 
1389   // Used to synchronize between JNI critical calls and the thread flip of the CC collector.
1390   Mutex* thread_flip_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1391   std::unique_ptr<ConditionVariable> thread_flip_cond_ GUARDED_BY(thread_flip_lock_);
1392   // This counter keeps track of how many threads are currently in a JNI critical section. This is
1393   // incremented once per thread even with nested enters.
1394   size_t disable_thread_flip_count_ GUARDED_BY(thread_flip_lock_);
1395   bool thread_flip_running_ GUARDED_BY(thread_flip_lock_);
1396 
1397   // Reference processor;
1398   std::unique_ptr<ReferenceProcessor> reference_processor_;
1399 
1400   // Task processor, proxies heap trim requests to the daemon threads.
1401   std::unique_ptr<TaskProcessor> task_processor_;
1402 
1403   // The following are declared volatile only for debugging purposes; it shouldn't otherwise
1404   // matter.
1405 
1406   // Collector type of the running GC.
1407   volatile CollectorType collector_type_running_ GUARDED_BY(gc_complete_lock_);
1408 
1409   // Cause of the last running GC.
1410   volatile GcCause last_gc_cause_ GUARDED_BY(gc_complete_lock_);
1411 
1412   // The thread currently running the GC.
1413   volatile Thread* thread_running_gc_ GUARDED_BY(gc_complete_lock_);
1414 
1415   // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on.
1416   volatile collector::GcType last_gc_type_ GUARDED_BY(gc_complete_lock_);
1417   collector::GcType next_gc_type_;
1418 
1419   // Maximum size that the heap can reach.
1420   size_t capacity_;
1421 
1422   // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap
1423   // programs it is "cleared" making it the same as capacity.
1424   // Only weakly enforced for simultaneous allocations.
1425   size_t growth_limit_;
1426 
1427   // Requested initial heap size. Temporarily ignored after a fork, but then reestablished after
1428   // a while to usually trigger the initial GC.
1429   size_t initial_heap_size_;
1430 
1431   // Target size (as in maximum allocatable bytes) for the heap. Weakly enforced as a limit for
1432   // non-concurrent GC. Used as a guideline for computing concurrent_start_bytes_ in the
1433   // concurrent GC case. Updates normally occur while collector_type_running_ is not none.
1434   Atomic<size_t> target_footprint_;
1435 
1436   Mutex process_state_update_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1437 
1438   // Computed with foreground-multiplier in GrowForUtilization() when run in
1439   // jank non-perceptible state. On update to process state from background to
1440   // foreground we set target_footprint_ to this value.
1441   size_t min_foreground_target_footprint_ GUARDED_BY(process_state_update_lock_);
1442 
1443   // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
1444   // it completes ahead of an allocation failing.
1445   // A multiple of this is also used to determine when to trigger a GC in response to native
1446   // allocation.
1447   // After initialization, this is only updated by the thread that set collector_type_running_ to
1448   // a value other than kCollectorTypeNone, or while holding gc_complete_lock, and ensuring that
1449   // collector_type_running_ is kCollectorTypeNone.
1450   size_t concurrent_start_bytes_;
1451 
1452   // Since the heap was created, how many bytes have been freed.
1453   std::atomic<uint64_t> total_bytes_freed_ever_;
1454 
1455   // Since the heap was created, how many objects have been freed.
1456   std::atomic<uint64_t> total_objects_freed_ever_;
1457 
1458   // Number of bytes currently allocated and not yet reclaimed. Includes active
1459   // TLABS in their entirety, even if they have not yet been parceled out.
1460   Atomic<size_t> num_bytes_allocated_;
1461 
1462   // Number of registered native bytes allocated. Adjusted after each RegisterNativeAllocation and
1463   // RegisterNativeFree. Used to  help determine when to trigger GC for native allocations. Should
1464   // not include bytes allocated through the system malloc, since those are implicitly included.
1465   Atomic<size_t> native_bytes_registered_;
1466 
1467   // Approximately the smallest value of GetNativeBytes() we've seen since the last GC.
1468   Atomic<size_t> old_native_bytes_allocated_;
1469 
1470   // Total number of native objects of which we were notified since the beginning of time, mod 2^32.
1471   // Allows us to check for GC only roughly every kNotifyNativeInterval allocations.
1472   Atomic<uint32_t> native_objects_notified_;
1473 
1474   // Number of bytes freed by thread local buffer revokes. This will
1475   // cancel out the ahead-of-time bulk counting of bytes allocated in
1476   // rosalloc thread-local buffers.  It is temporarily accumulated
1477   // here to be subtracted from num_bytes_allocated_ later at the next
1478   // GC.
1479   Atomic<size_t> num_bytes_freed_revoke_;
1480 
1481   // Records the number of bytes allocated at the time of GC, which is used later to calculate
1482   // how many bytes have been allocated since the last GC
1483   size_t num_bytes_alive_after_gc_;
1484 
1485   // Info related to the current or previous GC iteration.
1486   collector::Iteration current_gc_iteration_;
1487 
1488   // Heap verification flags.
1489   const bool verify_missing_card_marks_;
1490   const bool verify_system_weaks_;
1491   const bool verify_pre_gc_heap_;
1492   const bool verify_pre_sweeping_heap_;
1493   const bool verify_post_gc_heap_;
1494   const bool verify_mod_union_table_;
1495   bool verify_pre_gc_rosalloc_;
1496   bool verify_pre_sweeping_rosalloc_;
1497   bool verify_post_gc_rosalloc_;
1498   const bool gc_stress_mode_;
1499 
1500   // RAII that temporarily disables the rosalloc verification during
1501   // the zygote fork.
1502   class ScopedDisableRosAllocVerification {
1503    private:
1504     Heap* const heap_;
1505     const bool orig_verify_pre_gc_;
1506     const bool orig_verify_pre_sweeping_;
1507     const bool orig_verify_post_gc_;
1508 
1509    public:
ScopedDisableRosAllocVerification(Heap * heap)1510     explicit ScopedDisableRosAllocVerification(Heap* heap)
1511         : heap_(heap),
1512           orig_verify_pre_gc_(heap_->verify_pre_gc_rosalloc_),
1513           orig_verify_pre_sweeping_(heap_->verify_pre_sweeping_rosalloc_),
1514           orig_verify_post_gc_(heap_->verify_post_gc_rosalloc_) {
1515       heap_->verify_pre_gc_rosalloc_ = false;
1516       heap_->verify_pre_sweeping_rosalloc_ = false;
1517       heap_->verify_post_gc_rosalloc_ = false;
1518     }
~ScopedDisableRosAllocVerification()1519     ~ScopedDisableRosAllocVerification() {
1520       heap_->verify_pre_gc_rosalloc_ = orig_verify_pre_gc_;
1521       heap_->verify_pre_sweeping_rosalloc_ = orig_verify_pre_sweeping_;
1522       heap_->verify_post_gc_rosalloc_ = orig_verify_post_gc_;
1523     }
1524   };
1525 
1526   // Parallel GC data structures.
1527   std::unique_ptr<ThreadPool> thread_pool_;
1528 
1529   // A bitmap that is set corresponding to the known live objects since the last GC cycle.
1530   std::unique_ptr<accounting::HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
1531   // A bitmap that is set corresponding to the marked objects in the current GC cycle.
1532   std::unique_ptr<accounting::HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
1533 
1534   // Mark stack that we reuse to avoid re-allocating the mark stack.
1535   std::unique_ptr<accounting::ObjectStack> mark_stack_;
1536 
1537   // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us
1538   // to use the live bitmap as the old mark bitmap.
1539   const size_t max_allocation_stack_size_;
1540   std::unique_ptr<accounting::ObjectStack> allocation_stack_;
1541 
1542   // Second allocation stack so that we can process allocation with the heap unlocked.
1543   std::unique_ptr<accounting::ObjectStack> live_stack_;
1544 
1545   // Allocator type.
1546   AllocatorType current_allocator_;
1547   const AllocatorType current_non_moving_allocator_;
1548 
1549   // Which GCs we run in order when an allocation fails.
1550   std::vector<collector::GcType> gc_plan_;
1551 
1552   // Bump pointer spaces.
1553   space::BumpPointerSpace* bump_pointer_space_;
1554   // Temp space is the space which the semispace collector copies to.
1555   space::BumpPointerSpace* temp_space_;
1556 
1557   // Region space, used by the concurrent collector.
1558   space::RegionSpace* region_space_;
1559 
1560   // Minimum free guarantees that you always have at least min_free_ free bytes after growing for
1561   // utilization, regardless of target utilization ratio.
1562   const size_t min_free_;
1563 
1564   // The ideal maximum free size, when we grow the heap for utilization.
1565   const size_t max_free_;
1566 
1567   // Target ideal heap utilization ratio.
1568   double target_utilization_;
1569 
1570   // How much more we grow the heap when we are a foreground app instead of background.
1571   double foreground_heap_growth_multiplier_;
1572 
1573   // The amount of native memory allocation since the last GC required to cause us to wait for a
1574   // collection as a result of native allocation. Very large values can cause the device to run
1575   // out of memory, due to lack of finalization to reclaim native memory.  Making it too small can
1576   // cause jank in apps like launcher that intentionally allocate large amounts of memory in rapid
1577   // succession. (b/122099093) 1/4 to 1/3 of physical memory seems to be a good number.
1578   const size_t stop_for_native_allocs_;
1579 
1580   // Total time which mutators are paused or waiting for GC to complete.
1581   uint64_t total_wait_time_;
1582 
1583   // The current state of heap verification, may be enabled or disabled.
1584   VerifyObjectMode verify_object_mode_;
1585 
1586   // Compacting GC disable count, prevents compacting GC from running iff > 0.
1587   size_t disable_moving_gc_count_ GUARDED_BY(gc_complete_lock_);
1588 
1589   std::vector<collector::GarbageCollector*> garbage_collectors_;
1590   collector::SemiSpace* semi_space_collector_;
1591   Atomic<collector::ConcurrentCopying*> active_concurrent_copying_collector_;
1592   collector::ConcurrentCopying* young_concurrent_copying_collector_;
1593   collector::ConcurrentCopying* concurrent_copying_collector_;
1594 
1595   const bool is_running_on_memory_tool_;
1596   const bool use_tlab_;
1597 
1598   // Pointer to the space which becomes the new main space when we do homogeneous space compaction.
1599   // Use unique_ptr since the space is only added during the homogeneous compaction phase.
1600   std::unique_ptr<space::MallocSpace> main_space_backup_;
1601 
1602   // Minimal interval allowed between two homogeneous space compactions caused by OOM.
1603   uint64_t min_interval_homogeneous_space_compaction_by_oom_;
1604 
1605   // Times of the last homogeneous space compaction caused by OOM.
1606   uint64_t last_time_homogeneous_space_compaction_by_oom_;
1607 
1608   // Saved OOMs by homogeneous space compaction.
1609   Atomic<size_t> count_delayed_oom_;
1610 
1611   // Count for requested homogeneous space compaction.
1612   Atomic<size_t> count_requested_homogeneous_space_compaction_;
1613 
1614   // Count for ignored homogeneous space compaction.
1615   Atomic<size_t> count_ignored_homogeneous_space_compaction_;
1616 
1617   // Count for performed homogeneous space compaction.
1618   Atomic<size_t> count_performed_homogeneous_space_compaction_;
1619 
1620   // The number of garbage collections (either young or full, not trims or the like) we have
1621   // completed since heap creation. We include requests that turned out to be impossible
1622   // because they were disabled. We guard against wrapping, though that's unlikely.
1623   // Increment is guarded by gc_complete_lock_.
1624   Atomic<uint32_t> gcs_completed_;
1625 
1626   // The number of the last garbage collection that has been requested.  A value of gcs_completed
1627   // + 1 indicates that another collection is needed or in progress. A value of gcs_completed_ or
1628   // (logically) less means that no new GC has been requested.
1629   Atomic<uint32_t> max_gc_requested_;
1630 
1631   // Active tasks which we can modify (change target time, desired collector type, etc..).
1632   CollectorTransitionTask* pending_collector_transition_ GUARDED_BY(pending_task_lock_);
1633   HeapTrimTask* pending_heap_trim_ GUARDED_BY(pending_task_lock_);
1634 
1635   // Whether or not we use homogeneous space compaction to avoid OOM errors.
1636   bool use_homogeneous_space_compaction_for_oom_;
1637 
1638   // If true, enable generational collection when using the Concurrent Copying
1639   // (CC) collector, i.e. use sticky-bit CC for minor collections and (full) CC
1640   // for major collections. Set in Heap constructor.
1641   const bool use_generational_cc_;
1642 
1643   // True if the currently running collection has made some thread wait.
1644   bool running_collection_is_blocking_ GUARDED_BY(gc_complete_lock_);
1645   // The number of blocking GC runs.
1646   uint64_t blocking_gc_count_;
1647   // The total duration of blocking GC runs.
1648   uint64_t blocking_gc_time_;
1649   // The duration of the window for the GC count rate histograms.
1650   static constexpr uint64_t kGcCountRateHistogramWindowDuration = MsToNs(10 * 1000);  // 10s.
1651   // Maximum number of missed histogram windows for which statistics will be collected.
1652   static constexpr uint64_t kGcCountRateHistogramMaxNumMissedWindows = 100;
1653   // The last time when the GC count rate histograms were updated.
1654   // This is rounded by kGcCountRateHistogramWindowDuration (a multiple of 10s).
1655   uint64_t last_update_time_gc_count_rate_histograms_;
1656   // The running count of GC runs in the last window.
1657   uint64_t gc_count_last_window_;
1658   // The running count of blocking GC runs in the last window.
1659   uint64_t blocking_gc_count_last_window_;
1660   // The maximum number of buckets in the GC count rate histograms.
1661   static constexpr size_t kGcCountRateMaxBucketCount = 200;
1662   // The histogram of the number of GC invocations per window duration.
1663   Histogram<uint64_t> gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
1664   // The histogram of the number of blocking GC invocations per window duration.
1665   Histogram<uint64_t> blocking_gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
1666 
1667   // Allocation tracking support
1668   Atomic<bool> alloc_tracking_enabled_;
1669   std::unique_ptr<AllocRecordObjectMap> allocation_records_;
1670   size_t alloc_record_depth_;
1671 
1672   // Perfetto Java Heap Profiler support.
1673   HeapSampler heap_sampler_;
1674 
1675   // GC stress related data structures.
1676   Mutex* backtrace_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1677   // Debugging variables, seen backtraces vs unique backtraces.
1678   Atomic<uint64_t> seen_backtrace_count_;
1679   Atomic<uint64_t> unique_backtrace_count_;
1680   // Stack trace hashes that we already saw,
1681   std::unordered_set<uint64_t> seen_backtraces_ GUARDED_BY(backtrace_lock_);
1682 
1683   // Userfaultfd file descriptor.
1684   // TODO (lokeshgidra): remove this when the userfaultfd-based GC is in use.
1685   int uffd_;
1686   // We disable GC when we are shutting down the runtime in case there are daemon threads still
1687   // allocating.
1688   bool gc_disabled_for_shutdown_ GUARDED_BY(gc_complete_lock_);
1689 
1690   // Turned on by -XX:DumpRegionInfoBeforeGC and -XX:DumpRegionInfoAfterGC to
1691   // emit region info before and after each GC cycle.
1692   bool dump_region_info_before_gc_;
1693   bool dump_region_info_after_gc_;
1694 
1695   // Boot image spaces.
1696   std::vector<space::ImageSpace*> boot_image_spaces_;
1697 
1698   // Boot image address range. Includes images and oat files.
1699   uint32_t boot_images_start_address_;
1700   uint32_t boot_images_size_;
1701 
1702   // The number of times we initiated a GC of last resort to try to avoid an OOME.
1703   Atomic<uint64_t> pre_oome_gc_count_;
1704 
1705   // An installed allocation listener.
1706   Atomic<AllocationListener*> alloc_listener_;
1707   // An installed GC Pause listener.
1708   Atomic<GcPauseListener*> gc_pause_listener_;
1709 
1710   std::unique_ptr<Verification> verification_;
1711 
1712   friend class CollectorTransitionTask;
1713   friend class collector::GarbageCollector;
1714   friend class collector::ConcurrentCopying;
1715   friend class collector::MarkSweep;
1716   friend class collector::SemiSpace;
1717   friend class GCCriticalSection;
1718   friend class ReferenceQueue;
1719   friend class ScopedGCCriticalSection;
1720   friend class ScopedInterruptibleGCCriticalSection;
1721   friend class VerifyReferenceCardVisitor;
1722   friend class VerifyReferenceVisitor;
1723   friend class VerifyObjectVisitor;
1724 
1725   DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
1726 };
1727 
1728 }  // namespace gc
1729 }  // namespace art
1730 
1731 #endif  // ART_RUNTIME_GC_HEAP_H_
1732