• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_HEAP_H_
18 #define ART_RUNTIME_GC_HEAP_H_
19 
20 #include <iosfwd>
21 #include <string>
22 #include <vector>
23 
24 #include "allocator_type.h"
25 #include "atomic.h"
26 #include "base/timing_logger.h"
27 #include "gc/accounting/atomic_stack.h"
28 #include "gc/accounting/card_table.h"
29 #include "gc/gc_cause.h"
30 #include "gc/collector/garbage_collector.h"
31 #include "gc/collector/gc_type.h"
32 #include "gc/collector_type.h"
33 #include "globals.h"
34 #include "gtest/gtest.h"
35 #include "instruction_set.h"
36 #include "jni.h"
37 #include "object_callbacks.h"
38 #include "offsets.h"
39 #include "reference_processor.h"
40 #include "safe_map.h"
41 #include "thread_pool.h"
42 #include "verify_object.h"
43 
44 namespace art {
45 
46 class ConditionVariable;
47 class Mutex;
48 class StackVisitor;
49 class Thread;
50 class TimingLogger;
51 
52 namespace mirror {
53   class Class;
54   class Object;
55 }  // namespace mirror
56 
57 namespace gc {
58 
59 class ReferenceProcessor;
60 
61 namespace accounting {
62   class HeapBitmap;
63   class ModUnionTable;
64   class RememberedSet;
65 }  // namespace accounting
66 
67 namespace collector {
68   class ConcurrentCopying;
69   class GarbageCollector;
70   class MarkCompact;
71   class MarkSweep;
72   class SemiSpace;
73 }  // namespace collector
74 
75 namespace allocator {
76   class RosAlloc;
77 }  // namespace allocator
78 
79 namespace space {
80   class AllocSpace;
81   class BumpPointerSpace;
82   class DiscontinuousSpace;
83   class DlMallocSpace;
84   class ImageSpace;
85   class LargeObjectSpace;
86   class MallocSpace;
87   class RosAllocSpace;
88   class Space;
89   class SpaceTest;
90   class ContinuousMemMapAllocSpace;
91 }  // namespace space
92 
93 class AgeCardVisitor {
94  public:
operator()95   byte operator()(byte card) const {
96     if (card == accounting::CardTable::kCardDirty) {
97       return card - 1;
98     } else {
99       return 0;
100     }
101   }
102 };
103 
104 enum HomogeneousSpaceCompactResult {
105   // Success.
106   kSuccess,
107   // Reject due to disabled moving GC.
108   kErrorReject,
109   // System is shutting down.
110   kErrorVMShuttingDown,
111 };
112 
113 // If true, use rosalloc/RosAllocSpace instead of dlmalloc/DlMallocSpace
114 static constexpr bool kUseRosAlloc = true;
115 
116 // If true, use thread-local allocation stack.
117 static constexpr bool kUseThreadLocalAllocationStack = true;
118 
119 // The process state passed in from the activity manager, used to determine when to do trimming
120 // and compaction.
121 enum ProcessState {
122   kProcessStateJankPerceptible = 0,
123   kProcessStateJankImperceptible = 1,
124 };
125 std::ostream& operator<<(std::ostream& os, const ProcessState& process_state);
126 
127 class Heap {
128  public:
129   // If true, measure the total allocation time.
130   static constexpr bool kMeasureAllocationTime = false;
131   // Primitive arrays larger than this size are put in the large object space.
132   static constexpr size_t kDefaultLargeObjectThreshold = 3 * kPageSize;
133   static constexpr size_t kDefaultStartingSize = kPageSize;
134   static constexpr size_t kDefaultInitialSize = 2 * MB;
135   static constexpr size_t kDefaultMaximumSize = 256 * MB;
136   static constexpr size_t kDefaultNonMovingSpaceCapacity = 64 * MB;
137   static constexpr size_t kDefaultMaxFree = 2 * MB;
138   static constexpr size_t kDefaultMinFree = kDefaultMaxFree / 4;
139   static constexpr size_t kDefaultLongPauseLogThreshold = MsToNs(5);
140   static constexpr size_t kDefaultLongGCLogThreshold = MsToNs(100);
141   static constexpr size_t kDefaultTLABSize = 256 * KB;
142   static constexpr double kDefaultTargetUtilization = 0.5;
143   static constexpr double kDefaultHeapGrowthMultiplier = 2.0;
144 
145   // Used so that we don't overflow the allocation time atomic integer.
146   static constexpr size_t kTimeAdjust = 1024;
147 
148   // How often we allow heap trimming to happen (nanoseconds).
149   static constexpr uint64_t kHeapTrimWait = MsToNs(5000);
150   // How long we wait after a transition request to perform a collector transition (nanoseconds).
151   static constexpr uint64_t kCollectorTransitionWait = MsToNs(5000);
152 
153   // Create a heap with the requested sizes. The possible empty
154   // image_file_names names specify Spaces to load based on
155   // ImageWriter output.
156   explicit Heap(size_t initial_size, size_t growth_limit, size_t min_free,
157                 size_t max_free, double target_utilization,
158                 double foreground_heap_growth_multiplier, size_t capacity,
159                 size_t non_moving_space_capacity,
160                 const std::string& original_image_file_name,
161                 InstructionSet image_instruction_set,
162                 CollectorType foreground_collector_type, CollectorType background_collector_type,
163                 size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
164                 size_t long_pause_threshold, size_t long_gc_threshold,
165                 bool ignore_max_footprint, bool use_tlab,
166                 bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap,
167                 bool verify_pre_gc_rosalloc, bool verify_pre_sweeping_rosalloc,
168                 bool verify_post_gc_rosalloc, bool use_homogeneous_space_compaction,
169                 uint64_t min_interval_homogeneous_space_compaction_by_oom);
170 
171   ~Heap();
172 
173   // Allocates and initializes storage for an object instance.
174   template <bool kInstrumented, typename PreFenceVisitor>
AllocObject(Thread * self,mirror::Class * klass,size_t num_bytes,const PreFenceVisitor & pre_fence_visitor)175   mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes,
176                               const PreFenceVisitor& pre_fence_visitor)
177       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
178     return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes,
179                                                          GetCurrentAllocator(),
180                                                          pre_fence_visitor);
181   }
182 
183   template <bool kInstrumented, typename PreFenceVisitor>
AllocNonMovableObject(Thread * self,mirror::Class * klass,size_t num_bytes,const PreFenceVisitor & pre_fence_visitor)184   mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass, size_t num_bytes,
185                                         const PreFenceVisitor& pre_fence_visitor)
186       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
187     return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes,
188                                                          GetCurrentNonMovingAllocator(),
189                                                          pre_fence_visitor);
190   }
191 
192   template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
193   ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(
194       Thread* self, mirror::Class* klass, size_t byte_count, AllocatorType allocator,
195       const PreFenceVisitor& pre_fence_visitor)
196       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
197 
GetCurrentAllocator()198   AllocatorType GetCurrentAllocator() const {
199     return current_allocator_;
200   }
201 
GetCurrentNonMovingAllocator()202   AllocatorType GetCurrentNonMovingAllocator() const {
203     return current_non_moving_allocator_;
204   }
205 
206   // Visit all of the live objects in the heap.
207   void VisitObjects(ObjectCallback callback, void* arg)
208       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
209 
210   void CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count)
211       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
212 
213   void RegisterNativeAllocation(JNIEnv* env, size_t bytes);
214   void RegisterNativeFree(JNIEnv* env, size_t bytes);
215 
216   // Change the allocator, updates entrypoints.
217   void ChangeAllocator(AllocatorType allocator)
218       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
219       LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
220 
221   // Transition the garbage collector during runtime, may copy objects from one space to another.
222   void TransitionCollector(CollectorType collector_type);
223 
224   // Change the collector to be one of the possible options (MS, CMS, SS).
225   void ChangeCollector(CollectorType collector_type)
226       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
227 
228   // The given reference is believed to be to an object in the Java heap, check the soundness of it.
229   // TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a
230   // proper lock ordering for it.
231   void VerifyObjectBody(mirror::Object* o) NO_THREAD_SAFETY_ANALYSIS;
232 
233   // Check sanity of all live references.
234   void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
235   // Returns how many failures occured.
236   size_t VerifyHeapReferences(bool verify_referents = true)
237       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
238   bool VerifyMissingCardMarks()
239       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
240 
241   // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
242   // and doesn't abort on error, allowing the caller to report more
243   // meaningful diagnostics.
244   bool IsValidObjectAddress(const mirror::Object* obj) const
245       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
246 
247   // Faster alternative to IsHeapAddress since finding if an object is in the large object space is
248   // very slow.
249   bool IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const
250       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
251 
252   // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
253   // Requires the heap lock to be held.
254   bool IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack = true,
255                           bool search_live_stack = true, bool sorted = false)
256       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
257 
258   // Returns true if there is any chance that the object (obj) will move.
259   bool IsMovableObject(const mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
260 
261   // Enables us to compacting GC until objects are released.
262   void IncrementDisableMovingGC(Thread* self);
263   void DecrementDisableMovingGC(Thread* self);
264 
265   // Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits.
266   void ClearMarkedObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
267 
268   // Initiates an explicit garbage collection.
269   void CollectGarbage(bool clear_soft_references);
270 
271   // Does a concurrent GC, should only be called by the GC daemon thread
272   // through runtime.
273   void ConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
274 
275   // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
276   // The boolean decides whether to use IsAssignableFrom or == when comparing classes.
277   void CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
278                       uint64_t* counts)
279       LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
280       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
281   // Implements JDWP RT_Instances.
282   void GetInstances(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
283       LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
284       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
285   // Implements JDWP OR_ReferringObjects.
286   void GetReferringObjects(mirror::Object* o, int32_t max_count, std::vector<mirror::Object*>& referring_objects)
287       LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
288       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
289 
290   // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
291   // implement dalvik.system.VMRuntime.clearGrowthLimit.
292   void ClearGrowthLimit();
293 
294   // Target ideal heap utilization ratio, implements
295   // dalvik.system.VMRuntime.getTargetHeapUtilization.
GetTargetHeapUtilization()296   double GetTargetHeapUtilization() const {
297     return target_utilization_;
298   }
299 
300   // Data structure memory usage tracking.
301   void RegisterGCAllocation(size_t bytes);
302   void RegisterGCDeAllocation(size_t bytes);
303 
304   // Set the heap's private space pointers to be the same as the space based on it's type. Public
305   // due to usage by tests.
306   void SetSpaceAsDefault(space::ContinuousSpace* continuous_space)
307       LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
308   void AddSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
309   void RemoveSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
310 
311   // Set target ideal heap utilization ratio, implements
312   // dalvik.system.VMRuntime.setTargetHeapUtilization.
313   void SetTargetHeapUtilization(float target);
314 
315   // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate
316   // from the system. Doesn't allow the space to exceed its growth limit.
317   void SetIdealFootprint(size_t max_allowed_footprint);
318 
319   // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
320   // waited for.
321   collector::GcType WaitForGcToComplete(GcCause cause, Thread* self)
322       LOCKS_EXCLUDED(gc_complete_lock_);
323 
324   // Update the heap's process state to a new value, may cause compaction to occur.
325   void UpdateProcessState(ProcessState process_state);
326 
GetContinuousSpaces()327   const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const {
328     return continuous_spaces_;
329   }
330 
GetDiscontinuousSpaces()331   const std::vector<space::DiscontinuousSpace*>& GetDiscontinuousSpaces() const {
332     return discontinuous_spaces_;
333   }
334 
GetCurrentGcIteration()335   const collector::Iteration* GetCurrentGcIteration() const {
336     return &current_gc_iteration_;
337   }
GetCurrentGcIteration()338   collector::Iteration* GetCurrentGcIteration() {
339     return &current_gc_iteration_;
340   }
341 
342   // Enable verification of object references when the runtime is sufficiently initialized.
EnableObjectValidation()343   void EnableObjectValidation() {
344     verify_object_mode_ = kVerifyObjectSupport;
345     if (verify_object_mode_ > kVerifyObjectModeDisabled) {
346       VerifyHeap();
347     }
348   }
349 
350   // Disable object reference verification for image writing.
DisableObjectValidation()351   void DisableObjectValidation() {
352     verify_object_mode_ = kVerifyObjectModeDisabled;
353   }
354 
355   // Other checks may be performed if we know the heap should be in a sane state.
IsObjectValidationEnabled()356   bool IsObjectValidationEnabled() const {
357     return verify_object_mode_ > kVerifyObjectModeDisabled;
358   }
359 
360   // Returns true if low memory mode is enabled.
IsLowMemoryMode()361   bool IsLowMemoryMode() const {
362     return low_memory_mode_;
363   }
364 
365   // Returns the heap growth multiplier, this affects how much we grow the heap after a GC.
366   // Scales heap growth, min free, and max free.
367   double HeapGrowthMultiplier() const;
368 
369   // Freed bytes can be negative in cases where we copy objects from a compacted space to a
370   // free-list backed space.
371   void RecordFree(uint64_t freed_objects, int64_t freed_bytes);
372 
373   // Must be called if a field of an Object in the heap changes, and before any GC safe-point.
374   // The call is not needed if NULL is stored in the field.
WriteBarrierField(const mirror::Object * dst,MemberOffset,const mirror::Object *)375   void WriteBarrierField(const mirror::Object* dst, MemberOffset /*offset*/,
376                          const mirror::Object* /*new_value*/) {
377     card_table_->MarkCard(dst);
378   }
379 
380   // Write barrier for array operations that update many field positions
WriteBarrierArray(const mirror::Object * dst,int,size_t)381   void WriteBarrierArray(const mirror::Object* dst, int /*start_offset*/,
382                          size_t /*length TODO: element_count or byte_count?*/) {
383     card_table_->MarkCard(dst);
384   }
385 
WriteBarrierEveryFieldOf(const mirror::Object * obj)386   void WriteBarrierEveryFieldOf(const mirror::Object* obj) {
387     card_table_->MarkCard(obj);
388   }
389 
GetCardTable()390   accounting::CardTable* GetCardTable() const {
391     return card_table_.get();
392   }
393 
394   void AddFinalizerReference(Thread* self, mirror::Object** object);
395 
396   // Returns the number of bytes currently allocated.
GetBytesAllocated()397   size_t GetBytesAllocated() const {
398     return num_bytes_allocated_.LoadSequentiallyConsistent();
399   }
400 
401   // Returns the number of objects currently allocated.
402   size_t GetObjectsAllocated() const LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
403 
404   // Returns the total number of objects allocated since the heap was created.
405   uint64_t GetObjectsAllocatedEver() const;
406 
407   // Returns the total number of bytes allocated since the heap was created.
408   uint64_t GetBytesAllocatedEver() const;
409 
410   // Returns the total number of objects freed since the heap was created.
GetObjectsFreedEver()411   uint64_t GetObjectsFreedEver() const {
412     return total_objects_freed_ever_;
413   }
414 
415   // Returns the total number of bytes freed since the heap was created.
GetBytesFreedEver()416   uint64_t GetBytesFreedEver() const {
417     return total_bytes_freed_ever_;
418   }
419 
420   // Implements java.lang.Runtime.maxMemory, returning the maximum amount of memory a program can
421   // consume. For a regular VM this would relate to the -Xmx option and would return -1 if no Xmx
422   // were specified. Android apps start with a growth limit (small heap size) which is
423   // cleared/extended for large apps.
GetMaxMemory()424   size_t GetMaxMemory() const {
425     // There is some race conditions in the allocation code that can cause bytes allocated to
426     // become larger than growth_limit_ in rare cases.
427     return std::max(GetBytesAllocated(), growth_limit_);
428   }
429 
430   // Implements java.lang.Runtime.totalMemory, returning approximate amount of memory currently
431   // consumed by an application.
432   size_t GetTotalMemory() const;
433 
434   // Returns approximately how much free memory we have until the next GC happens.
GetFreeMemoryUntilGC()435   size_t GetFreeMemoryUntilGC() const {
436     return max_allowed_footprint_ - GetBytesAllocated();
437   }
438 
439   // Returns approximately how much free memory we have until the next OOME happens.
GetFreeMemoryUntilOOME()440   size_t GetFreeMemoryUntilOOME() const {
441     return growth_limit_ - GetBytesAllocated();
442   }
443 
444   // Returns how much free memory we have until we need to grow the heap to perform an allocation.
445   // Similar to GetFreeMemoryUntilGC. Implements java.lang.Runtime.freeMemory.
GetFreeMemory()446   size_t GetFreeMemory() const {
447     size_t byte_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
448     size_t total_memory = GetTotalMemory();
449     // Make sure we don't get a negative number.
450     return total_memory - std::min(total_memory, byte_allocated);
451   }
452 
453   // get the space that corresponds to an object's address. Current implementation searches all
454   // spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
455   // TODO: consider using faster data structure like binary tree.
456   space::ContinuousSpace* FindContinuousSpaceFromObject(const mirror::Object*, bool fail_ok) const;
457   space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(const mirror::Object*,
458                                                               bool fail_ok) const;
459   space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const;
460 
461   void DumpForSigQuit(std::ostream& os) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
462 
463   // Do a pending heap transition or trim.
464   void DoPendingTransitionOrTrim() LOCKS_EXCLUDED(heap_trim_request_lock_);
465 
466   // Trim the managed and native heaps by releasing unused memory back to the OS.
467   void Trim() LOCKS_EXCLUDED(heap_trim_request_lock_);
468 
469   void RevokeThreadLocalBuffers(Thread* thread);
470   void RevokeRosAllocThreadLocalBuffers(Thread* thread);
471   void RevokeAllThreadLocalBuffers();
472   void AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
473   void RosAllocVerification(TimingLogger* timings, const char* name)
474       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
475 
GetLiveBitmap()476   accounting::HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
477     return live_bitmap_.get();
478   }
479 
GetMarkBitmap()480   accounting::HeapBitmap* GetMarkBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
481     return mark_bitmap_.get();
482   }
483 
GetLiveStack()484   accounting::ObjectStack* GetLiveStack() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
485     return live_stack_.get();
486   }
487 
488   void PreZygoteFork() NO_THREAD_SAFETY_ANALYSIS;
489 
490   // Mark and empty stack.
491   void FlushAllocStack()
492       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
493 
494   // Revoke all the thread-local allocation stacks.
495   void RevokeAllThreadLocalAllocationStacks(Thread* self)
496       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
497       LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, Locks::thread_list_lock_);
498 
499   // Mark all the objects in the allocation stack in the specified bitmap.
500   // TODO: Refactor?
501   void MarkAllocStack(accounting::SpaceBitmap<kObjectAlignment>* bitmap1,
502                       accounting::SpaceBitmap<kObjectAlignment>* bitmap2,
503                       accounting::SpaceBitmap<kLargeObjectAlignment>* large_objects,
504                       accounting::ObjectStack* stack)
505       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
506 
507   // Mark the specified allocation stack as live.
508   void MarkAllocStackAsLive(accounting::ObjectStack* stack)
509       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
510 
511   // Unbind any bound bitmaps.
512   void UnBindBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
513 
514   // DEPRECATED: Should remove in "near" future when support for multiple image spaces is added.
515   // Assumes there is only one image space.
516   space::ImageSpace* GetImageSpace() const;
517 
518   // Permenantly disable moving garbage collection.
519   void DisableMovingGc();
520 
GetDlMallocSpace()521   space::DlMallocSpace* GetDlMallocSpace() const {
522     return dlmalloc_space_;
523   }
524 
GetRosAllocSpace()525   space::RosAllocSpace* GetRosAllocSpace() const {
526     return rosalloc_space_;
527   }
528 
529   // Return the corresponding rosalloc space.
530   space::RosAllocSpace* GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const;
531 
GetNonMovingSpace()532   space::MallocSpace* GetNonMovingSpace() const {
533     return non_moving_space_;
534   }
535 
GetLargeObjectsSpace()536   space::LargeObjectSpace* GetLargeObjectsSpace() const {
537     return large_object_space_;
538   }
539 
540   // Returns the free list space that may contain movable objects (the
541   // one that's not the non-moving space), either rosalloc_space_ or
542   // dlmalloc_space_.
GetPrimaryFreeListSpace()543   space::MallocSpace* GetPrimaryFreeListSpace() {
544     if (kUseRosAlloc) {
545       DCHECK(rosalloc_space_ != nullptr);
546       // reinterpret_cast is necessary as the space class hierarchy
547       // isn't known (#included) yet here.
548       return reinterpret_cast<space::MallocSpace*>(rosalloc_space_);
549     } else {
550       DCHECK(dlmalloc_space_ != nullptr);
551       return reinterpret_cast<space::MallocSpace*>(dlmalloc_space_);
552     }
553   }
554 
555   std::string DumpSpaces() const WARN_UNUSED;
556   void DumpSpaces(std::ostream& stream) const;
557 
558   // Dump object should only be used by the signal handler.
559   void DumpObject(std::ostream& stream, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
560   // Safe version of pretty type of which check to make sure objects are heap addresses.
561   std::string SafeGetClassDescriptor(mirror::Class* klass) NO_THREAD_SAFETY_ANALYSIS;
562   std::string SafePrettyTypeOf(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
563 
564   // GC performance measuring
565   void DumpGcPerformanceInfo(std::ostream& os);
566 
567   // Returns true if we currently care about pause times.
CareAboutPauseTimes()568   bool CareAboutPauseTimes() const {
569     return process_state_ == kProcessStateJankPerceptible;
570   }
571 
572   // Thread pool.
573   void CreateThreadPool();
574   void DeleteThreadPool();
GetThreadPool()575   ThreadPool* GetThreadPool() {
576     return thread_pool_.get();
577   }
GetParallelGCThreadCount()578   size_t GetParallelGCThreadCount() const {
579     return parallel_gc_threads_;
580   }
GetConcGCThreadCount()581   size_t GetConcGCThreadCount() const {
582     return conc_gc_threads_;
583   }
584   accounting::ModUnionTable* FindModUnionTableFromSpace(space::Space* space);
585   void AddModUnionTable(accounting::ModUnionTable* mod_union_table);
586 
587   accounting::RememberedSet* FindRememberedSetFromSpace(space::Space* space);
588   void AddRememberedSet(accounting::RememberedSet* remembered_set);
589   // Also deletes the remebered set.
590   void RemoveRememberedSet(space::Space* space);
591 
592   bool IsCompilingBoot() const;
RunningOnValgrind()593   bool RunningOnValgrind() const {
594     return running_on_valgrind_;
595   }
596   bool HasImageSpace() const;
597 
GetReferenceProcessor()598   ReferenceProcessor* GetReferenceProcessor() {
599     return &reference_processor_;
600   }
601 
602  private:
603   // Compact source space to target space.
604   void Compact(space::ContinuousMemMapAllocSpace* target_space,
605                space::ContinuousMemMapAllocSpace* source_space,
606                GcCause gc_cause)
607       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
608 
609   void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_);
610 
611   // Create a mem map with a preferred base address.
612   static MemMap* MapAnonymousPreferredAddress(const char* name, byte* request_begin,
613                                               size_t capacity, int prot_flags,
614                                               std::string* out_error_str);
615 
SupportHSpaceCompaction()616   bool SupportHSpaceCompaction() const {
617     // Returns true if we can do hspace compaction
618     return main_space_backup_ != nullptr;
619   }
620 
AllocatorHasAllocationStack(AllocatorType allocator_type)621   static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) {
622     return
623         allocator_type != kAllocatorTypeBumpPointer &&
624         allocator_type != kAllocatorTypeTLAB;
625   }
AllocatorMayHaveConcurrentGC(AllocatorType allocator_type)626   static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) {
627     return AllocatorHasAllocationStack(allocator_type);
628   }
IsMovingGc(CollectorType collector_type)629   static bool IsMovingGc(CollectorType collector_type) {
630     return collector_type == kCollectorTypeSS || collector_type == kCollectorTypeGSS ||
631         collector_type == kCollectorTypeCC || collector_type == kCollectorTypeMC ||
632         collector_type == kCollectorTypeHomogeneousSpaceCompact;
633   }
634   bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const
635       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
636   ALWAYS_INLINE void CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated,
637                                        mirror::Object** obj)
638       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
639 
GetMarkStack()640   accounting::ObjectStack* GetMarkStack() {
641     return mark_stack_.get();
642   }
643 
644   // We don't force this to be inlined since it is a slow path.
645   template <bool kInstrumented, typename PreFenceVisitor>
646   mirror::Object* AllocLargeObject(Thread* self, mirror::Class* klass, size_t byte_count,
647                                    const PreFenceVisitor& pre_fence_visitor)
648       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
649 
650   // Handles Allocate()'s slow allocation path with GC involved after
651   // an initial allocation attempt failed.
652   mirror::Object* AllocateInternalWithGc(Thread* self, AllocatorType allocator, size_t num_bytes,
653                                          size_t* bytes_allocated, size_t* usable_size,
654                                          mirror::Class** klass)
655       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
656       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
657 
658   // Allocate into a specific space.
659   mirror::Object* AllocateInto(Thread* self, space::AllocSpace* space, mirror::Class* c,
660                                size_t bytes)
661       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
662 
663   // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
664   // wrong space.
665   void SwapSemiSpaces() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
666 
667   // Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so
668   // that the switch statement is constant optimized in the entrypoints.
669   template <const bool kInstrumented, const bool kGrow>
670   ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self, AllocatorType allocator_type,
671                                               size_t alloc_size, size_t* bytes_allocated,
672                                               size_t* usable_size)
673       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
674 
675   void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type)
676       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
677 
678   template <bool kGrow>
679   bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size);
680 
681   // Returns true if the address passed in is within the address range of a continuous space.
682   bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const
683       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
684 
685   // Run the finalizers.
686   void RunFinalization(JNIEnv* env);
687 
688   // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
689   // waited for.
690   collector::GcType WaitForGcToCompleteLocked(GcCause cause, Thread* self)
691       EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_);
692 
693   void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
694       LOCKS_EXCLUDED(heap_trim_request_lock_);
695   void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
696   void RequestConcurrentGCAndSaveObject(Thread* self, mirror::Object** obj)
697       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
698   void RequestConcurrentGC(Thread* self)
699       LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
700   bool IsGCRequestPending() const;
701 
702   // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
703   // which type of Gc was actually ran.
704   collector::GcType CollectGarbageInternal(collector::GcType gc_plan, GcCause gc_cause,
705                                            bool clear_soft_references)
706       LOCKS_EXCLUDED(gc_complete_lock_,
707                      Locks::heap_bitmap_lock_,
708                      Locks::thread_suspend_count_lock_);
709 
710   void PreGcVerification(collector::GarbageCollector* gc)
711       LOCKS_EXCLUDED(Locks::mutator_lock_);
712   void PreGcVerificationPaused(collector::GarbageCollector* gc)
713       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
714   void PrePauseRosAllocVerification(collector::GarbageCollector* gc)
715       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
716   void PreSweepingGcVerification(collector::GarbageCollector* gc)
717       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
718   void PostGcVerification(collector::GarbageCollector* gc)
719       LOCKS_EXCLUDED(Locks::mutator_lock_);
720   void PostGcVerificationPaused(collector::GarbageCollector* gc)
721       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
722 
723   // Update the watermark for the native allocated bytes based on the current number of native
724   // bytes allocated and the target utilization ratio.
725   void UpdateMaxNativeFootprint();
726 
727   // Find a collector based on GC type.
728   collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
729 
730   // Create a new alloc space and compact default alloc space to it.
731   HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact();
732 
733   // Create the main free list malloc space, either a RosAlloc space or DlMalloc space.
734   void CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
735                              size_t capacity);
736 
737   // Create a malloc space based on a mem map. Does not set the space as default.
738   space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap* mem_map, size_t initial_size,
739                                                   size_t growth_limit, size_t capacity,
740                                                   const char* name, bool can_move_objects);
741 
742   // Given the current contents of the alloc space, increase the allowed heap footprint to match
743   // the target utilization ratio.  This should only be called immediately after a full garbage
744   // collection.
745   void GrowForUtilization(collector::GarbageCollector* collector_ran);
746 
747   size_t GetPercentFree();
748 
749   static void VerificationCallback(mirror::Object* obj, void* arg)
750       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
751 
752   // Swap the allocation stack with the live stack.
753   void SwapStacks(Thread* self);
754 
755   // Clear cards and update the mod union table.
756   void ProcessCards(TimingLogger* timings, bool use_rem_sets);
757 
758   // Signal the heap trim daemon that there is something to do, either a heap transition or heap
759   // trim.
760   void SignalHeapTrimDaemon(Thread* self);
761 
762   // Push an object onto the allocation stack.
763   void PushOnAllocationStack(Thread* self, mirror::Object** obj)
764       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
765   void PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj)
766       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
767   void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, mirror::Object** obj)
768       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
769 
770   // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
771   // sweep GC, false for other GC types.
IsGcConcurrent()772   bool IsGcConcurrent() const ALWAYS_INLINE {
773     return collector_type_ == kCollectorTypeCMS || collector_type_ == kCollectorTypeCC;
774   }
775 
776   // All-known continuous spaces, where objects lie within fixed bounds.
777   std::vector<space::ContinuousSpace*> continuous_spaces_;
778 
779   // All-known discontinuous spaces, where objects may be placed throughout virtual memory.
780   std::vector<space::DiscontinuousSpace*> discontinuous_spaces_;
781 
782   // All-known alloc spaces, where objects may be or have been allocated.
783   std::vector<space::AllocSpace*> alloc_spaces_;
784 
785   // A space where non-movable objects are allocated, when compaction is enabled it contains
786   // Classes, ArtMethods, ArtFields, and non moving objects.
787   space::MallocSpace* non_moving_space_;
788 
789   // Space which we use for the kAllocatorTypeROSAlloc.
790   space::RosAllocSpace* rosalloc_space_;
791 
792   // Space which we use for the kAllocatorTypeDlMalloc.
793   space::DlMallocSpace* dlmalloc_space_;
794 
795   // The main space is the space which the GC copies to and from on process state updates. This
796   // space is typically either the dlmalloc_space_ or the rosalloc_space_.
797   space::MallocSpace* main_space_;
798 
799   // The large object space we are currently allocating into.
800   space::LargeObjectSpace* large_object_space_;
801 
802   // The card table, dirtied by the write barrier.
803   std::unique_ptr<accounting::CardTable> card_table_;
804 
805   // A mod-union table remembers all of the references from the it's space to other spaces.
806   AllocationTrackingSafeMap<space::Space*, accounting::ModUnionTable*, kAllocatorTagHeap>
807       mod_union_tables_;
808 
809   // A remembered set remembers all of the references from the it's space to the target space.
810   AllocationTrackingSafeMap<space::Space*, accounting::RememberedSet*, kAllocatorTagHeap>
811       remembered_sets_;
812 
813   // The current collector type.
814   CollectorType collector_type_;
815   // Which collector we use when the app is in the foreground.
816   CollectorType foreground_collector_type_;
817   // Which collector we will use when the app is notified of a transition to background.
818   CollectorType background_collector_type_;
819   // Desired collector type, heap trimming daemon transitions the heap if it is != collector_type_.
820   CollectorType desired_collector_type_;
821 
822   // Lock which guards heap trim requests.
823   Mutex* heap_trim_request_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
824   // When we want to perform the next heap trim (nano seconds).
825   uint64_t last_trim_time_ GUARDED_BY(heap_trim_request_lock_);
826   // When we want to perform the next heap transition (nano seconds) or heap trim.
827   uint64_t heap_transition_or_trim_target_time_ GUARDED_BY(heap_trim_request_lock_);
828   // If we have a heap trim request pending.
829   bool heap_trim_request_pending_ GUARDED_BY(heap_trim_request_lock_);
830 
831   // How many GC threads we may use for paused parts of garbage collection.
832   const size_t parallel_gc_threads_;
833 
834   // How many GC threads we may use for unpaused parts of garbage collection.
835   const size_t conc_gc_threads_;
836 
837   // Boolean for if we are in low memory mode.
838   const bool low_memory_mode_;
839 
840   // If we get a pause longer than long pause log threshold, then we print out the GC after it
841   // finishes.
842   const size_t long_pause_log_threshold_;
843 
844   // If we get a GC longer than long GC log threshold, then we print out the GC after it finishes.
845   const size_t long_gc_log_threshold_;
846 
847   // If we ignore the max footprint it lets the heap grow until it hits the heap capacity, this is
848   // useful for benchmarking since it reduces time spent in GC to a low %.
849   const bool ignore_max_footprint_;
850 
851   // Lock which guards zygote space creation.
852   Mutex zygote_creation_lock_;
853 
854   // If we have a zygote space.
855   bool have_zygote_space_;
856 
857   // Minimum allocation size of large object.
858   size_t large_object_threshold_;
859 
860   // Guards access to the state of GC, associated conditional variable is used to signal when a GC
861   // completes.
862   Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
863   std::unique_ptr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
864 
865   // Reference processor;
866   ReferenceProcessor reference_processor_;
867 
868   // True while the garbage collector is running.
869   volatile CollectorType collector_type_running_ GUARDED_BY(gc_complete_lock_);
870 
871   // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on.
872   volatile collector::GcType last_gc_type_ GUARDED_BY(gc_complete_lock_);
873   collector::GcType next_gc_type_;
874 
875   // Maximum size that the heap can reach.
876   const size_t capacity_;
877 
878   // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap
879   // programs it is "cleared" making it the same as capacity.
880   size_t growth_limit_;
881 
882   // When the number of bytes allocated exceeds the footprint TryAllocate returns NULL indicating
883   // a GC should be triggered.
884   size_t max_allowed_footprint_;
885 
886   // The watermark at which a concurrent GC is requested by registerNativeAllocation.
887   size_t native_footprint_gc_watermark_;
888 
889   // Whether or not we need to run finalizers in the next native allocation.
890   bool native_need_to_run_finalization_;
891 
892   // Whether or not we currently care about pause times.
893   ProcessState process_state_;
894 
895   // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
896   // it completes ahead of an allocation failing.
897   size_t concurrent_start_bytes_;
898 
899   // Since the heap was created, how many bytes have been freed.
900   uint64_t total_bytes_freed_ever_;
901 
902   // Since the heap was created, how many objects have been freed.
903   uint64_t total_objects_freed_ever_;
904 
905   // Number of bytes allocated.  Adjusted after each allocation and free.
906   Atomic<size_t> num_bytes_allocated_;
907 
908   // Bytes which are allocated and managed by native code but still need to be accounted for.
909   Atomic<size_t> native_bytes_allocated_;
910 
911   // Info related to the current or previous GC iteration.
912   collector::Iteration current_gc_iteration_;
913 
914   // Heap verification flags.
915   const bool verify_missing_card_marks_;
916   const bool verify_system_weaks_;
917   const bool verify_pre_gc_heap_;
918   const bool verify_pre_sweeping_heap_;
919   const bool verify_post_gc_heap_;
920   const bool verify_mod_union_table_;
921   bool verify_pre_gc_rosalloc_;
922   bool verify_pre_sweeping_rosalloc_;
923   bool verify_post_gc_rosalloc_;
924 
925   // RAII that temporarily disables the rosalloc verification during
926   // the zygote fork.
927   class ScopedDisableRosAllocVerification {
928    private:
929     Heap* const heap_;
930     const bool orig_verify_pre_gc_;
931     const bool orig_verify_pre_sweeping_;
932     const bool orig_verify_post_gc_;
933 
934    public:
ScopedDisableRosAllocVerification(Heap * heap)935     explicit ScopedDisableRosAllocVerification(Heap* heap)
936         : heap_(heap),
937           orig_verify_pre_gc_(heap_->verify_pre_gc_rosalloc_),
938           orig_verify_pre_sweeping_(heap_->verify_pre_sweeping_rosalloc_),
939           orig_verify_post_gc_(heap_->verify_post_gc_rosalloc_) {
940       heap_->verify_pre_gc_rosalloc_ = false;
941       heap_->verify_pre_sweeping_rosalloc_ = false;
942       heap_->verify_post_gc_rosalloc_ = false;
943     }
~ScopedDisableRosAllocVerification()944     ~ScopedDisableRosAllocVerification() {
945       heap_->verify_pre_gc_rosalloc_ = orig_verify_pre_gc_;
946       heap_->verify_pre_sweeping_rosalloc_ = orig_verify_pre_sweeping_;
947       heap_->verify_post_gc_rosalloc_ = orig_verify_post_gc_;
948     }
949   };
950 
951   // Parallel GC data structures.
952   std::unique_ptr<ThreadPool> thread_pool_;
953 
954   // The nanosecond time at which the last GC ended.
955   uint64_t last_gc_time_ns_;
956 
957   // How many bytes were allocated at the end of the last GC.
958   uint64_t last_gc_size_;
959 
960   // Estimated allocation rate (bytes / second). Computed between the time of the last GC cycle
961   // and the start of the current one.
962   uint64_t allocation_rate_;
963 
964   // For a GC cycle, a bitmap that is set corresponding to the
965   std::unique_ptr<accounting::HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
966   std::unique_ptr<accounting::HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
967 
968   // Mark stack that we reuse to avoid re-allocating the mark stack.
969   std::unique_ptr<accounting::ObjectStack> mark_stack_;
970 
971   // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us
972   // to use the live bitmap as the old mark bitmap.
973   const size_t max_allocation_stack_size_;
974   std::unique_ptr<accounting::ObjectStack> allocation_stack_;
975 
976   // Second allocation stack so that we can process allocation with the heap unlocked.
977   std::unique_ptr<accounting::ObjectStack> live_stack_;
978 
979   // Allocator type.
980   AllocatorType current_allocator_;
981   const AllocatorType current_non_moving_allocator_;
982 
983   // Which GCs we run in order when we an allocation fails.
984   std::vector<collector::GcType> gc_plan_;
985 
986   // Bump pointer spaces.
987   space::BumpPointerSpace* bump_pointer_space_;
988   // Temp space is the space which the semispace collector copies to.
989   space::BumpPointerSpace* temp_space_;
990 
991   // Minimum free guarantees that you always have at least min_free_ free bytes after growing for
992   // utilization, regardless of target utilization ratio.
993   size_t min_free_;
994 
995   // The ideal maximum free size, when we grow the heap for utilization.
996   size_t max_free_;
997 
998   // Target ideal heap utilization ratio
999   double target_utilization_;
1000 
1001   // How much more we grow the heap when we are a foreground app instead of background.
1002   double foreground_heap_growth_multiplier_;
1003 
1004   // Total time which mutators are paused or waiting for GC to complete.
1005   uint64_t total_wait_time_;
1006 
1007   // Total number of objects allocated in microseconds.
1008   AtomicInteger total_allocation_time_;
1009 
1010   // The current state of heap verification, may be enabled or disabled.
1011   VerifyObjectMode verify_object_mode_;
1012 
1013   // Compacting GC disable count, prevents compacting GC from running iff > 0.
1014   size_t disable_moving_gc_count_ GUARDED_BY(gc_complete_lock_);
1015 
1016   std::vector<collector::GarbageCollector*> garbage_collectors_;
1017   collector::SemiSpace* semi_space_collector_;
1018   collector::MarkCompact* mark_compact_collector_;
1019   collector::ConcurrentCopying* concurrent_copying_collector_;
1020 
1021   const bool running_on_valgrind_;
1022   const bool use_tlab_;
1023 
1024   // Pointer to the space which becomes the new main space when we do homogeneous space compaction.
1025   // Use unique_ptr since the space is only added during the homogeneous compaction phase.
1026   std::unique_ptr<space::MallocSpace> main_space_backup_;
1027 
1028   // Minimal interval allowed between two homogeneous space compactions caused by OOM.
1029   uint64_t min_interval_homogeneous_space_compaction_by_oom_;
1030 
1031   // Times of the last homogeneous space compaction caused by OOM.
1032   uint64_t last_time_homogeneous_space_compaction_by_oom_;
1033 
1034   // Saved OOMs by homogeneous space compaction.
1035   Atomic<size_t> count_delayed_oom_;
1036 
1037   // Count for requested homogeneous space compaction.
1038   Atomic<size_t> count_requested_homogeneous_space_compaction_;
1039 
1040   // Count for ignored homogeneous space compaction.
1041   Atomic<size_t> count_ignored_homogeneous_space_compaction_;
1042 
1043   // Count for performed homogeneous space compaction.
1044   Atomic<size_t> count_performed_homogeneous_space_compaction_;
1045 
1046   // Whether or not we use homogeneous space compaction to avoid OOM errors.
1047   bool use_homogeneous_space_compaction_for_oom_;
1048 
1049   friend class collector::GarbageCollector;
1050   friend class collector::MarkCompact;
1051   friend class collector::MarkSweep;
1052   friend class collector::SemiSpace;
1053   friend class ReferenceQueue;
1054   friend class VerifyReferenceCardVisitor;
1055   friend class VerifyReferenceVisitor;
1056   friend class VerifyObjectVisitor;
1057   friend class ScopedHeapFill;
1058   friend class ScopedHeapLock;
1059   friend class space::SpaceTest;
1060 
1061   class AllocationTimer {
1062    private:
1063     Heap* heap_;
1064     mirror::Object** allocated_obj_ptr_;
1065     uint64_t allocation_start_time_;
1066    public:
1067     AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr);
1068     ~AllocationTimer();
1069   };
1070 
1071   DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
1072 };
1073 
1074 // ScopedHeapFill changes the bytes allocated counter to be equal to the growth limit. This
1075 // causes the next allocation to perform a GC and possibly an OOM. It can be used to ensure that a
1076 // GC happens in specific methods such as ThrowIllegalMonitorStateExceptionF in Monitor::Wait.
1077 class ScopedHeapFill {
1078  public:
ScopedHeapFill(Heap * heap)1079   explicit ScopedHeapFill(Heap* heap)
1080       : heap_(heap),
1081         delta_(heap_->GetMaxMemory() - heap_->GetBytesAllocated()) {
1082     heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(delta_);
1083   }
~ScopedHeapFill()1084   ~ScopedHeapFill() {
1085     heap_->num_bytes_allocated_.FetchAndSubSequentiallyConsistent(delta_);
1086   }
1087 
1088  private:
1089   Heap* const heap_;
1090   const int64_t delta_;
1091 };
1092 
1093 }  // namespace gc
1094 }  // namespace art
1095 
1096 #endif  // ART_RUNTIME_GC_HEAP_H_
1097