1 /* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_GC_HEAP_H_ 18 #define ART_RUNTIME_GC_HEAP_H_ 19 20 #include <android-base/logging.h> 21 22 #include <iosfwd> 23 #include <string> 24 #include <unordered_set> 25 #include <vector> 26 27 #include "allocator_type.h" 28 #include "base/atomic.h" 29 #include "base/histogram.h" 30 #include "base/macros.h" 31 #include "base/mutex.h" 32 #include "base/os.h" 33 #include "base/runtime_debug.h" 34 #include "base/safe_map.h" 35 #include "base/time_utils.h" 36 #include "gc/collector/gc_type.h" 37 #include "gc/collector/iteration.h" 38 #include "gc/collector/mark_compact.h" 39 #include "gc/collector_type.h" 40 #include "gc/gc_cause.h" 41 #include "gc/space/large_object_space.h" 42 #include "gc/space/space.h" 43 #include "handle.h" 44 #include "obj_ptr.h" 45 #include "offsets.h" 46 #include "process_state.h" 47 #include "read_barrier_config.h" 48 #include "runtime_globals.h" 49 #include "scoped_thread_state_change.h" 50 #include "verify_object.h" 51 52 namespace art HIDDEN { 53 54 class ConditionVariable; 55 enum class InstructionSet; 56 class IsMarkedVisitor; 57 class Mutex; 58 class ReflectiveValueVisitor; 59 class RootVisitor; 60 class StackVisitor; 61 class Thread; 62 class ThreadPool; 63 class TimingLogger; 64 class VariableSizedHandleScope; 65 66 namespace mirror { 67 class Class; 68 class Object; 69 } // namespace mirror 70 71 namespace gc { 72 73 class AllocationListener; 74 class AllocRecordObjectMap; 75 class GcPauseListener; 76 class HeapTask; 77 class ReferenceProcessor; 78 class TaskProcessor; 79 class Verification; 80 81 namespace accounting { 82 template <typename T> class AtomicStack; 83 using ObjectStack = AtomicStack<mirror::Object>; 84 class CardTable; 85 class HeapBitmap; 86 class ModUnionTable; 87 class ReadBarrierTable; 88 class RememberedSet; 89 } // namespace accounting 90 91 namespace collector { 92 class ConcurrentCopying; 93 class GarbageCollector; 94 class MarkSweep; 95 class SemiSpace; 96 } // namespace collector 97 98 namespace allocator { 99 class RosAlloc; 100 } // namespace allocator 101 102 namespace space { 103 class AllocSpace; 104 class BumpPointerSpace; 105 class ContinuousMemMapAllocSpace; 106 class DiscontinuousSpace; 107 class DlMallocSpace; 108 class ImageSpace; 109 class LargeObjectSpace; 110 class MallocSpace; 111 class RegionSpace; 112 class RosAllocSpace; 113 class Space; 114 class ZygoteSpace; 115 } // namespace space 116 117 enum HomogeneousSpaceCompactResult { 118 // Success. 119 kSuccess, 120 // Reject due to disabled moving GC. 121 kErrorReject, 122 // Unsupported due to the current configuration. 123 kErrorUnsupported, 124 // System is shutting down. 125 kErrorVMShuttingDown, 126 }; 127 128 // If true, use rosalloc/RosAllocSpace instead of dlmalloc/DlMallocSpace 129 static constexpr bool kUseRosAlloc = true; 130 131 // If true, use thread-local allocation stack. 132 static constexpr bool kUseThreadLocalAllocationStack = true; 133 134 class Heap { 135 public: 136 // How much we grow the TLAB if we can do it. 137 static constexpr size_t kPartialTlabSize = 16 * KB; 138 static constexpr bool kUsePartialTlabs = true; 139 140 static constexpr size_t kDefaultInitialSize = 2 * MB; 141 static constexpr size_t kDefaultMaximumSize = 256 * MB; 142 static constexpr size_t kDefaultNonMovingSpaceCapacity = 64 * MB; 143 static constexpr size_t kDefaultMaxFree = 32 * MB; 144 static constexpr size_t kDefaultMinFree = kDefaultMaxFree / 4; 145 static constexpr size_t kDefaultLongPauseLogThreshold = MsToNs(5); 146 static constexpr size_t kDefaultLongPauseLogThresholdGcStress = MsToNs(50); 147 static constexpr size_t kDefaultLongGCLogThreshold = MsToNs(100); 148 static constexpr size_t kDefaultLongGCLogThresholdGcStress = MsToNs(1000); 149 static constexpr size_t kDefaultTLABSize = 32 * KB; 150 static constexpr double kDefaultTargetUtilization = 0.6; 151 static constexpr double kDefaultHeapGrowthMultiplier = 2.0; 152 // Primitive arrays larger than this size are put in the large object space. 153 // TODO: Preliminary experiments suggest this value might be not optimal. 154 // This might benefit from further investigation. 155 static constexpr size_t kMinLargeObjectThreshold = 12 * KB; 156 static constexpr size_t kDefaultLargeObjectThreshold = kMinLargeObjectThreshold; 157 // Whether or not parallel GC is enabled. If not, then we never create the thread pool. 158 static constexpr bool kDefaultEnableParallelGC = true; 159 static uint8_t* const kPreferredAllocSpaceBegin; 160 161 // Whether or not we use the free list large object space. Only use it if USE_ART_LOW_4G_ALLOCATOR 162 // since this means that we have to use the slow msync loop in MemMap::MapAnonymous. 163 static constexpr space::LargeObjectSpaceType kDefaultLargeObjectSpaceType = 164 USE_ART_LOW_4G_ALLOCATOR ? 165 space::LargeObjectSpaceType::kFreeList 166 : space::LargeObjectSpaceType::kMap; 167 168 // Used so that we don't overflow the allocation time atomic integer. 169 static constexpr size_t kTimeAdjust = 1024; 170 171 // Client should call NotifyNativeAllocation every kNotifyNativeInterval allocations. 172 // Should be chosen so that time_to_call_mallinfo / kNotifyNativeInterval is on the same order 173 // as object allocation time. time_to_call_mallinfo seems to be on the order of 1 usec 174 // on Android. 175 #ifdef __ANDROID__ 176 static constexpr uint32_t kNotifyNativeInterval = 64; 177 #else 178 // Some host mallinfo() implementations are slow. And memory is less scarce. 179 static constexpr uint32_t kNotifyNativeInterval = 384; 180 #endif 181 182 // RegisterNativeAllocation checks immediately whether GC is needed if size exceeds the 183 // following. kCheckImmediatelyThreshold * kNotifyNativeInterval should be small enough to 184 // make it safe to allocate that many bytes between checks. 185 static constexpr size_t kCheckImmediatelyThreshold = (10'000'000 / kNotifyNativeInterval); 186 187 // How often we allow heap trimming to happen (nanoseconds). 188 static constexpr uint64_t kHeapTrimWait = MsToNs(5000); 189 190 // Starting size of DlMalloc/RosAlloc spaces. GetDefaultStartingSize()191 static size_t GetDefaultStartingSize() { 192 return gPageSize; 193 } 194 195 // Whether the transition-GC heap threshold condition applies or not for non-low memory devices. 196 // Stressing GC will bypass the heap threshold condition. 197 DECLARE_RUNTIME_DEBUG_FLAG(kStressCollectorTransition); 198 199 // Create a heap with the requested sizes. The possible empty 200 // image_file_names names specify Spaces to load based on 201 // ImageWriter output. 202 Heap(size_t initial_size, 203 size_t growth_limit, 204 size_t min_free, 205 size_t max_free, 206 double target_utilization, 207 double foreground_heap_growth_multiplier, 208 size_t stop_for_native_allocs, 209 size_t capacity, 210 size_t non_moving_space_capacity, 211 const std::vector<std::string>& boot_class_path, 212 const std::vector<std::string>& boot_class_path_locations, 213 ArrayRef<File> boot_class_path_files, 214 ArrayRef<File> boot_class_path_image_files, 215 ArrayRef<File> boot_class_path_vdex_files, 216 ArrayRef<File> boot_class_path_oat_files, 217 const std::vector<std::string>& image_file_names, 218 InstructionSet image_instruction_set, 219 CollectorType foreground_collector_type, 220 CollectorType background_collector_type, 221 space::LargeObjectSpaceType large_object_space_type, 222 size_t large_object_threshold, 223 size_t parallel_gc_threads, 224 size_t conc_gc_threads, 225 bool low_memory_mode, 226 size_t long_pause_threshold, 227 size_t long_gc_threshold, 228 bool ignore_target_footprint, 229 bool always_log_explicit_gcs, 230 bool use_tlab, 231 bool verify_pre_gc_heap, 232 bool verify_pre_sweeping_heap, 233 bool verify_post_gc_heap, 234 bool verify_pre_gc_rosalloc, 235 bool verify_pre_sweeping_rosalloc, 236 bool verify_post_gc_rosalloc, 237 bool gc_stress_mode, 238 bool measure_gc_performance, 239 bool use_homogeneous_space_compaction, 240 bool use_generational_cc, 241 uint64_t min_interval_homogeneous_space_compaction_by_oom, 242 bool dump_region_info_before_gc, 243 bool dump_region_info_after_gc); 244 245 ~Heap(); 246 247 // Allocates and initializes storage for an object instance. 248 template <bool kInstrumented = true, typename PreFenceVisitor> AllocObject(Thread * self,ObjPtr<mirror::Class> klass,size_t num_bytes,const PreFenceVisitor & pre_fence_visitor)249 mirror::Object* AllocObject(Thread* self, 250 ObjPtr<mirror::Class> klass, 251 size_t num_bytes, 252 const PreFenceVisitor& pre_fence_visitor) 253 REQUIRES_SHARED(Locks::mutator_lock_) 254 REQUIRES(!*gc_complete_lock_, 255 !*pending_task_lock_, 256 !*backtrace_lock_, 257 !process_state_update_lock_, 258 !Roles::uninterruptible_) { 259 return AllocObjectWithAllocator<kInstrumented>(self, 260 klass, 261 num_bytes, 262 GetCurrentAllocator(), 263 pre_fence_visitor); 264 } 265 266 template <bool kInstrumented = true, typename PreFenceVisitor> AllocNonMovableObject(Thread * self,ObjPtr<mirror::Class> klass,size_t num_bytes,const PreFenceVisitor & pre_fence_visitor)267 mirror::Object* AllocNonMovableObject(Thread* self, 268 ObjPtr<mirror::Class> klass, 269 size_t num_bytes, 270 const PreFenceVisitor& pre_fence_visitor) 271 REQUIRES_SHARED(Locks::mutator_lock_) 272 REQUIRES(!*gc_complete_lock_, 273 !*pending_task_lock_, 274 !*backtrace_lock_, 275 !process_state_update_lock_, 276 !Roles::uninterruptible_) { 277 mirror::Object* obj = AllocObjectWithAllocator<kInstrumented>(self, 278 klass, 279 num_bytes, 280 GetCurrentNonMovingAllocator(), 281 pre_fence_visitor); 282 // Java Heap Profiler check and sample allocation. 283 if (GetHeapSampler().IsEnabled()) { 284 JHPCheckNonTlabSampleAllocation(self, obj, num_bytes); 285 } 286 return obj; 287 } 288 289 template <bool kInstrumented = true, bool kCheckLargeObject = true, typename PreFenceVisitor> 290 ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(Thread* self, 291 ObjPtr<mirror::Class> klass, 292 size_t byte_count, 293 AllocatorType allocator, 294 const PreFenceVisitor& pre_fence_visitor) 295 REQUIRES_SHARED(Locks::mutator_lock_) 296 REQUIRES(!*gc_complete_lock_, 297 !*pending_task_lock_, 298 !*backtrace_lock_, 299 !process_state_update_lock_, 300 !Roles::uninterruptible_); 301 GetCurrentAllocator()302 AllocatorType GetCurrentAllocator() const { 303 return current_allocator_; 304 } 305 GetCurrentNonMovingAllocator()306 AllocatorType GetCurrentNonMovingAllocator() const { 307 return current_non_moving_allocator_; 308 } 309 GetUpdatedAllocator(AllocatorType old_allocator)310 AllocatorType GetUpdatedAllocator(AllocatorType old_allocator) { 311 return (old_allocator == kAllocatorTypeNonMoving) ? 312 GetCurrentNonMovingAllocator() : GetCurrentAllocator(); 313 } 314 315 // Visit all of the live objects in the heap. 316 template <typename Visitor> 317 ALWAYS_INLINE void VisitObjects(Visitor&& visitor) 318 REQUIRES_SHARED(Locks::mutator_lock_) 319 REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_); 320 template <typename Visitor> 321 ALWAYS_INLINE void VisitObjectsPaused(Visitor&& visitor) 322 REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_); 323 324 void VisitReflectiveTargets(ReflectiveValueVisitor* visitor) 325 REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_); 326 327 void CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count) 328 REQUIRES_SHARED(Locks::mutator_lock_); 329 330 // Inform the garbage collector of a non-malloc allocated native memory that might become 331 // reclaimable in the future as a result of Java garbage collection. 332 void RegisterNativeAllocation(JNIEnv* env, size_t bytes) 333 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_); 334 void RegisterNativeFree(JNIEnv* env, size_t bytes); 335 336 // Notify the garbage collector of malloc allocations that might be reclaimable 337 // as a result of Java garbage collection. Each such call represents approximately 338 // kNotifyNativeInterval such allocations. 339 void NotifyNativeAllocations(JNIEnv* env) 340 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_); 341 GetNotifyNativeInterval()342 uint32_t GetNotifyNativeInterval() { 343 return kNotifyNativeInterval; 344 } 345 346 // Change the allocator, updates entrypoints. 347 void ChangeAllocator(AllocatorType allocator) 348 REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_); 349 350 // Change the collector to be one of the possible options (MS, CMS, SS). Only safe when no 351 // concurrent accesses to the heap are possible. 352 void ChangeCollector(CollectorType collector_type) 353 REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_); 354 355 // The given reference is believed to be to an object in the Java heap, check the soundness of it. 356 // TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a 357 // proper lock ordering for it. 358 void VerifyObjectBody(ObjPtr<mirror::Object> o) NO_THREAD_SAFETY_ANALYSIS; 359 360 // Consistency check of all live references. 361 void VerifyHeap() REQUIRES(!Locks::heap_bitmap_lock_); 362 // Returns how many failures occured. 363 size_t VerifyHeapReferences(bool verify_referents = true) 364 REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_); 365 bool VerifyMissingCardMarks() 366 REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 367 368 // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock, 369 // and doesn't abort on error, allowing the caller to report more 370 // meaningful diagnostics. 371 bool IsValidObjectAddress(const void* obj) const REQUIRES_SHARED(Locks::mutator_lock_); 372 373 // Faster alternative to IsHeapAddress since finding if an object is in the large object space is 374 // very slow. 375 bool IsNonDiscontinuousSpaceHeapAddress(const void* addr) const 376 REQUIRES_SHARED(Locks::mutator_lock_); 377 378 // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses). 379 // Requires the heap lock to be held. 380 bool IsLiveObjectLocked(ObjPtr<mirror::Object> obj, 381 bool search_allocation_stack = true, 382 bool search_live_stack = true, 383 bool sorted = false) 384 REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 385 386 // Returns true if there is any chance that the object (obj) will move. 387 bool IsMovableObject(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_); 388 389 // Enables us to compacting GC until objects are released. 390 EXPORT void IncrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_); 391 EXPORT void DecrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_); 392 393 // Temporarily disable thread flip for JNI critical calls. 394 void IncrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_); 395 void DecrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_); 396 void ThreadFlipBegin(Thread* self) REQUIRES(!*thread_flip_lock_); 397 void ThreadFlipEnd(Thread* self) REQUIRES(!*thread_flip_lock_); 398 399 // Ensures that the obj doesn't cause userfaultfd in JNI critical calls. 400 void EnsureObjectUserfaulted(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_); 401 402 // Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits. 403 // Mutator lock is required for GetContinuousSpaces. 404 void ClearMarkedObjects(bool release_eagerly = true) 405 REQUIRES(Locks::heap_bitmap_lock_) 406 REQUIRES_SHARED(Locks::mutator_lock_); 407 408 // Initiates an explicit garbage collection. Guarantees that a GC started after this call has 409 // completed. 410 EXPORT void CollectGarbage(bool clear_soft_references, GcCause cause = kGcCauseExplicit) 411 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_); 412 413 // Does a concurrent GC, provided the GC numbered requested_gc_num has not already been 414 // completed. Should only be called by the GC daemon thread through runtime. 415 void ConcurrentGC(Thread* self, GcCause cause, bool force_full, uint32_t requested_gc_num) 416 REQUIRES(!Locks::runtime_shutdown_lock_, !*gc_complete_lock_, 417 !*pending_task_lock_, !process_state_update_lock_); 418 419 // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount. 420 // The boolean decides whether to use IsAssignableFrom or == when comparing classes. 421 void CountInstances(const std::vector<Handle<mirror::Class>>& classes, 422 bool use_is_assignable_from, 423 uint64_t* counts) 424 REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_) 425 REQUIRES_SHARED(Locks::mutator_lock_); 426 427 // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to 428 // implement dalvik.system.VMRuntime.clearGrowthLimit. 429 void ClearGrowthLimit() REQUIRES(!*gc_complete_lock_); 430 431 // Make the current growth limit the new maximum capacity, unmaps pages at the end of spaces 432 // which will never be used. Used to implement dalvik.system.VMRuntime.clampGrowthLimit. 433 void ClampGrowthLimit() REQUIRES(!Locks::heap_bitmap_lock_); 434 435 // Target ideal heap utilization ratio, implements 436 // dalvik.system.VMRuntime.getTargetHeapUtilization. GetTargetHeapUtilization()437 double GetTargetHeapUtilization() const { 438 return target_utilization_; 439 } 440 441 // Data structure memory usage tracking. 442 void RegisterGCAllocation(size_t bytes); 443 void RegisterGCDeAllocation(size_t bytes); 444 445 // Set the heap's private space pointers to be the same as the space based on it's type. Public 446 // due to usage by tests. 447 void SetSpaceAsDefault(space::ContinuousSpace* continuous_space) 448 REQUIRES(!Locks::heap_bitmap_lock_); 449 void AddSpace(space::Space* space) 450 REQUIRES(!Locks::heap_bitmap_lock_) 451 REQUIRES(Locks::mutator_lock_); 452 void RemoveSpace(space::Space* space) 453 REQUIRES(!Locks::heap_bitmap_lock_) 454 REQUIRES(Locks::mutator_lock_); 455 GetPreGcWeightedAllocatedBytes()456 double GetPreGcWeightedAllocatedBytes() const { 457 return pre_gc_weighted_allocated_bytes_; 458 } 459 GetPostGcWeightedAllocatedBytes()460 double GetPostGcWeightedAllocatedBytes() const { 461 return post_gc_weighted_allocated_bytes_; 462 } 463 464 void CalculatePreGcWeightedAllocatedBytes(); 465 void CalculatePostGcWeightedAllocatedBytes(); 466 uint64_t GetTotalGcCpuTime(); 467 GetProcessCpuStartTime()468 uint64_t GetProcessCpuStartTime() const { 469 return process_cpu_start_time_ns_; 470 } 471 GetPostGCLastProcessCpuTime()472 uint64_t GetPostGCLastProcessCpuTime() const { 473 return post_gc_last_process_cpu_time_ns_; 474 } 475 476 // Set target ideal heap utilization ratio, implements 477 // dalvik.system.VMRuntime.setTargetHeapUtilization. 478 void SetTargetHeapUtilization(float target); 479 480 // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate 481 // from the system. Doesn't allow the space to exceed its growth limit. 482 // Set while we hold gc_complete_lock or collector_type_running_ != kCollectorTypeNone. 483 void SetIdealFootprint(size_t max_allowed_footprint); 484 485 // Blocks the caller until the garbage collector becomes idle and returns the type of GC we 486 // waited for. Only waits for running collections, ignoring a requested but unstarted GC. Only 487 // heuristic, since a new GC may have started by the time we return. 488 EXPORT collector::GcType WaitForGcToComplete(GcCause cause, Thread* self) 489 REQUIRES(!*gc_complete_lock_); 490 491 // Update the heap's process state to a new value, may cause compaction to occur. 492 void UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state) 493 REQUIRES(!*pending_task_lock_, !*gc_complete_lock_, !process_state_update_lock_); 494 HaveContinuousSpaces()495 bool HaveContinuousSpaces() const NO_THREAD_SAFETY_ANALYSIS { 496 // No lock since vector empty is thread safe. 497 return !continuous_spaces_.empty(); 498 } 499 GetContinuousSpaces()500 const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const 501 REQUIRES_SHARED(Locks::mutator_lock_) { 502 return continuous_spaces_; 503 } 504 GetDiscontinuousSpaces()505 const std::vector<space::DiscontinuousSpace*>& GetDiscontinuousSpaces() const 506 REQUIRES_SHARED(Locks::mutator_lock_) { 507 return discontinuous_spaces_; 508 } 509 GetCurrentGcIteration()510 const collector::Iteration* GetCurrentGcIteration() const { 511 return ¤t_gc_iteration_; 512 } GetCurrentGcIteration()513 collector::Iteration* GetCurrentGcIteration() { 514 return ¤t_gc_iteration_; 515 } 516 517 // Enable verification of object references when the runtime is sufficiently initialized. EnableObjectValidation()518 void EnableObjectValidation() { 519 verify_object_mode_ = kVerifyObjectSupport; 520 if (verify_object_mode_ > kVerifyObjectModeDisabled) { 521 VerifyHeap(); 522 } 523 } 524 525 // Disable object reference verification for image writing. DisableObjectValidation()526 void DisableObjectValidation() { 527 verify_object_mode_ = kVerifyObjectModeDisabled; 528 } 529 530 // Other checks may be performed if we know the heap should be in a healthy state. IsObjectValidationEnabled()531 bool IsObjectValidationEnabled() const { 532 return verify_object_mode_ > kVerifyObjectModeDisabled; 533 } 534 535 // Returns true if low memory mode is enabled. IsLowMemoryMode()536 bool IsLowMemoryMode() const { 537 return low_memory_mode_; 538 } 539 540 // Returns the heap growth multiplier, this affects how much we grow the heap after a GC. 541 // Scales heap growth, min free, and max free. 542 double HeapGrowthMultiplier() const; 543 544 // Freed bytes can be negative in cases where we copy objects from a compacted space to a 545 // free-list backed space. 546 void RecordFree(uint64_t freed_objects, int64_t freed_bytes); 547 548 // Record the bytes freed by thread-local buffer revoke. 549 void RecordFreeRevoke(); 550 GetCardTable()551 accounting::CardTable* GetCardTable() const { 552 return card_table_.get(); 553 } 554 GetReadBarrierTable()555 accounting::ReadBarrierTable* GetReadBarrierTable() const { 556 return rb_table_.get(); 557 } 558 559 EXPORT void AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object); 560 561 // Returns the number of bytes currently allocated. 562 // The result should be treated as an approximation, if it is being concurrently updated. GetBytesAllocated()563 size_t GetBytesAllocated() const { 564 return num_bytes_allocated_.load(std::memory_order_relaxed); 565 } 566 567 // Returns bytes_allocated before adding 'bytes' to it. AddBytesAllocated(size_t bytes)568 size_t AddBytesAllocated(size_t bytes) { 569 return num_bytes_allocated_.fetch_add(bytes, std::memory_order_relaxed); 570 } 571 GetUseGenerationalCC()572 bool GetUseGenerationalCC() const { 573 return use_generational_cc_; 574 } 575 576 // Returns the number of objects currently allocated. 577 size_t GetObjectsAllocated() const 578 REQUIRES(!Locks::heap_bitmap_lock_); 579 580 // Returns the total number of bytes allocated since the heap was created. 581 uint64_t GetBytesAllocatedEver() const; 582 583 // Returns the total number of bytes freed since the heap was created. 584 // Can decrease over time, and may even be negative, since moving an object to 585 // a space in which it occupies more memory results in negative "freed bytes". 586 // With default memory order, this should be viewed only as a hint. 587 int64_t GetBytesFreedEver(std::memory_order mo = std::memory_order_relaxed) const { 588 return total_bytes_freed_ever_.load(mo); 589 } 590 GetRegionSpace()591 space::RegionSpace* GetRegionSpace() const { 592 return region_space_; 593 } 594 GetBumpPointerSpace()595 space::BumpPointerSpace* GetBumpPointerSpace() const { 596 return bump_pointer_space_; 597 } 598 // Implements java.lang.Runtime.maxMemory, returning the maximum amount of memory a program can 599 // consume. For a regular VM this would relate to the -Xmx option and would return -1 if no Xmx 600 // were specified. Android apps start with a growth limit (small heap size) which is 601 // cleared/extended for large apps. GetMaxMemory()602 size_t GetMaxMemory() const { 603 // There are some race conditions in the allocation code that can cause bytes allocated to 604 // become larger than growth_limit_ in rare cases. 605 return std::max(GetBytesAllocated(), growth_limit_); 606 } 607 608 // Implements java.lang.Runtime.totalMemory, returning approximate amount of memory currently 609 // consumed by an application. 610 EXPORT size_t GetTotalMemory() const; 611 612 // Returns approximately how much free memory we have until the next GC happens. GetFreeMemoryUntilGC()613 size_t GetFreeMemoryUntilGC() const { 614 return UnsignedDifference(target_footprint_.load(std::memory_order_relaxed), 615 GetBytesAllocated()); 616 } 617 618 // Returns approximately how much free memory we have until the next OOME happens. GetFreeMemoryUntilOOME()619 size_t GetFreeMemoryUntilOOME() const { 620 return UnsignedDifference(growth_limit_, GetBytesAllocated()); 621 } 622 623 // Returns how much free memory we have until we need to grow the heap to perform an allocation. 624 // Similar to GetFreeMemoryUntilGC. Implements java.lang.Runtime.freeMemory. GetFreeMemory()625 size_t GetFreeMemory() const { 626 return UnsignedDifference(GetTotalMemory(), 627 num_bytes_allocated_.load(std::memory_order_relaxed)); 628 } 629 630 // Get the space that corresponds to an object's address. Current implementation searches all 631 // spaces in turn. If fail_ok is false then failing to find a space will cause an abort. 632 // TODO: consider using faster data structure like binary tree. 633 EXPORT space::ContinuousSpace* FindContinuousSpaceFromObject(ObjPtr<mirror::Object>, 634 bool fail_ok) const 635 REQUIRES_SHARED(Locks::mutator_lock_); 636 637 space::ContinuousSpace* FindContinuousSpaceFromAddress(const mirror::Object* addr) const 638 REQUIRES_SHARED(Locks::mutator_lock_); 639 640 space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object>, 641 bool fail_ok) const 642 REQUIRES_SHARED(Locks::mutator_lock_); 643 644 EXPORT space::Space* FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const 645 REQUIRES_SHARED(Locks::mutator_lock_); 646 647 space::Space* FindSpaceFromAddress(const void* ptr) const 648 REQUIRES_SHARED(Locks::mutator_lock_); 649 650 std::string DumpSpaceNameFromAddress(const void* addr) const 651 REQUIRES_SHARED(Locks::mutator_lock_); 652 653 void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_); 654 655 // Do a pending collector transition. 656 void DoPendingCollectorTransition() 657 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_); 658 659 // Deflate monitors, ... and trim the spaces. 660 EXPORT void Trim(Thread* self) REQUIRES(!*gc_complete_lock_); 661 662 void RevokeThreadLocalBuffers(Thread* thread); 663 void RevokeRosAllocThreadLocalBuffers(Thread* thread); 664 void RevokeAllThreadLocalBuffers(); 665 void AssertThreadLocalBuffersAreRevoked(Thread* thread); 666 void AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked(); 667 void RosAllocVerification(TimingLogger* timings, const char* name) 668 REQUIRES(Locks::mutator_lock_); 669 GetLiveBitmap()670 accounting::HeapBitmap* GetLiveBitmap() REQUIRES_SHARED(Locks::heap_bitmap_lock_) { 671 return live_bitmap_.get(); 672 } 673 GetMarkBitmap()674 accounting::HeapBitmap* GetMarkBitmap() REQUIRES_SHARED(Locks::heap_bitmap_lock_) { 675 return mark_bitmap_.get(); 676 } 677 GetLiveStack()678 accounting::ObjectStack* GetLiveStack() REQUIRES_SHARED(Locks::heap_bitmap_lock_) { 679 return live_stack_.get(); 680 } 681 GetAllocationStack()682 accounting::ObjectStack* GetAllocationStack() REQUIRES_SHARED(Locks::heap_bitmap_lock_) { 683 return allocation_stack_.get(); 684 } 685 686 void PreZygoteFork() NO_THREAD_SAFETY_ANALYSIS; 687 688 // Mark and empty stack. 689 EXPORT void FlushAllocStack() REQUIRES_SHARED(Locks::mutator_lock_) 690 REQUIRES(Locks::heap_bitmap_lock_); 691 692 // Revoke all the thread-local allocation stacks. 693 EXPORT void RevokeAllThreadLocalAllocationStacks(Thread* self) 694 REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_); 695 696 // Mark all the objects in the allocation stack in the specified bitmap. 697 // TODO: Refactor? 698 void MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1, 699 accounting::ContinuousSpaceBitmap* bitmap2, 700 accounting::LargeObjectBitmap* large_objects, 701 accounting::ObjectStack* stack) 702 REQUIRES_SHARED(Locks::mutator_lock_) 703 REQUIRES(Locks::heap_bitmap_lock_); 704 705 // Mark the specified allocation stack as live. 706 void MarkAllocStackAsLive(accounting::ObjectStack* stack) 707 REQUIRES_SHARED(Locks::mutator_lock_) 708 REQUIRES(Locks::heap_bitmap_lock_); 709 710 // Unbind any bound bitmaps. 711 void UnBindBitmaps() 712 REQUIRES(Locks::heap_bitmap_lock_) 713 REQUIRES_SHARED(Locks::mutator_lock_); 714 715 // Returns the boot image spaces. There may be multiple boot image spaces. GetBootImageSpaces()716 const std::vector<space::ImageSpace*>& GetBootImageSpaces() const { 717 return boot_image_spaces_; 718 } 719 720 // TODO(b/260881207): refactor to only use this function in debug builds and 721 // remove EXPORT. 722 EXPORT bool ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const 723 REQUIRES_SHARED(Locks::mutator_lock_); 724 725 bool IsInBootImageOatFile(const void* p) const 726 REQUIRES_SHARED(Locks::mutator_lock_); 727 728 // Get the start address of the boot images if any; otherwise returns 0. GetBootImagesStartAddress()729 uint32_t GetBootImagesStartAddress() const { 730 return boot_images_start_address_; 731 } 732 733 // Get the size of all boot images, including the heap and oat areas. GetBootImagesSize()734 uint32_t GetBootImagesSize() const { 735 return boot_images_size_; 736 } 737 738 // Check if a pointer points to a boot image. IsBootImageAddress(const void * p)739 bool IsBootImageAddress(const void* p) const { 740 return reinterpret_cast<uintptr_t>(p) - boot_images_start_address_ < boot_images_size_; 741 } 742 GetDlMallocSpace()743 space::DlMallocSpace* GetDlMallocSpace() const { 744 return dlmalloc_space_; 745 } 746 GetRosAllocSpace()747 space::RosAllocSpace* GetRosAllocSpace() const { 748 return rosalloc_space_; 749 } 750 751 // Return the corresponding rosalloc space. 752 space::RosAllocSpace* GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const 753 REQUIRES_SHARED(Locks::mutator_lock_); 754 GetNonMovingSpace()755 space::MallocSpace* GetNonMovingSpace() const { 756 return non_moving_space_; 757 } 758 GetLargeObjectsSpace()759 space::LargeObjectSpace* GetLargeObjectsSpace() const { 760 return large_object_space_; 761 } 762 763 // Returns the free list space that may contain movable objects (the 764 // one that's not the non-moving space), either rosalloc_space_ or 765 // dlmalloc_space_. GetPrimaryFreeListSpace()766 space::MallocSpace* GetPrimaryFreeListSpace() { 767 if (kUseRosAlloc) { 768 DCHECK(rosalloc_space_ != nullptr); 769 // reinterpret_cast is necessary as the space class hierarchy 770 // isn't known (#included) yet here. 771 return reinterpret_cast<space::MallocSpace*>(rosalloc_space_); 772 } else { 773 DCHECK(dlmalloc_space_ != nullptr); 774 return reinterpret_cast<space::MallocSpace*>(dlmalloc_space_); 775 } 776 } 777 778 void DumpSpaces(std::ostream& stream) const REQUIRES_SHARED(Locks::mutator_lock_); 779 EXPORT std::string DumpSpaces() const REQUIRES_SHARED(Locks::mutator_lock_); 780 781 // GC performance measuring 782 void DumpGcPerformanceInfo(std::ostream& os) 783 REQUIRES(!*gc_complete_lock_); 784 void ResetGcPerformanceInfo() REQUIRES(!*gc_complete_lock_); 785 786 // Thread pool. Create either the given number of threads, or as per the 787 // values of conc_gc_threads_ and parallel_gc_threads_. 788 void CreateThreadPool(size_t num_threads = 0); 789 void WaitForWorkersToBeCreated(); 790 void DeleteThreadPool(); GetThreadPool()791 ThreadPool* GetThreadPool() { 792 return thread_pool_.get(); 793 } GetParallelGCThreadCount()794 size_t GetParallelGCThreadCount() const { 795 return parallel_gc_threads_; 796 } GetConcGCThreadCount()797 size_t GetConcGCThreadCount() const { 798 return conc_gc_threads_; 799 } 800 accounting::ModUnionTable* FindModUnionTableFromSpace(space::Space* space); 801 void AddModUnionTable(accounting::ModUnionTable* mod_union_table); 802 803 accounting::RememberedSet* FindRememberedSetFromSpace(space::Space* space); 804 void AddRememberedSet(accounting::RememberedSet* remembered_set); 805 // Also deletes the remebered set. 806 void RemoveRememberedSet(space::Space* space); 807 808 bool IsCompilingBoot() const; HasBootImageSpace()809 bool HasBootImageSpace() const { 810 return !boot_image_spaces_.empty(); 811 } 812 bool HasAppImageSpaceFor(const std::string& dex_location) const; 813 GetReferenceProcessor()814 ReferenceProcessor* GetReferenceProcessor() { 815 return reference_processor_.get(); 816 } GetTaskProcessor()817 TaskProcessor* GetTaskProcessor() { 818 return task_processor_.get(); 819 } 820 HasZygoteSpace()821 bool HasZygoteSpace() const { 822 return zygote_space_ != nullptr; 823 } 824 825 // Returns the active concurrent copying collector. ConcurrentCopyingCollector()826 collector::ConcurrentCopying* ConcurrentCopyingCollector() { 827 collector::ConcurrentCopying* active_collector = 828 active_concurrent_copying_collector_.load(std::memory_order_relaxed); 829 if (use_generational_cc_) { 830 DCHECK((active_collector == concurrent_copying_collector_) || 831 (active_collector == young_concurrent_copying_collector_)) 832 << "active_concurrent_copying_collector: " << active_collector 833 << " young_concurrent_copying_collector: " << young_concurrent_copying_collector_ 834 << " concurrent_copying_collector: " << concurrent_copying_collector_; 835 } else { 836 DCHECK_EQ(active_collector, concurrent_copying_collector_); 837 } 838 return active_collector; 839 } 840 MarkCompactCollector()841 collector::MarkCompact* MarkCompactCollector() { 842 DCHECK(!gUseUserfaultfd || mark_compact_ != nullptr); 843 return mark_compact_; 844 } 845 IsPerformingUffdCompaction()846 bool IsPerformingUffdCompaction() { return gUseUserfaultfd && mark_compact_->IsCompacting(); } 847 CurrentCollectorType()848 CollectorType CurrentCollectorType() const { 849 DCHECK(!gUseUserfaultfd || collector_type_ == kCollectorTypeCMC); 850 return collector_type_; 851 } 852 IsMovingGc()853 bool IsMovingGc() const { return IsMovingGc(CurrentCollectorType()); } 854 GetForegroundCollectorType()855 CollectorType GetForegroundCollectorType() const { return foreground_collector_type_; } 856 // EXPORT is needed to make this method visible for libartservice. 857 EXPORT std::string GetForegroundCollectorName(); 858 IsGcConcurrentAndMoving()859 bool IsGcConcurrentAndMoving() const { 860 if (IsGcConcurrent() && IsMovingGc(collector_type_)) { 861 // Assume no transition when a concurrent moving collector is used. 862 DCHECK_EQ(collector_type_, foreground_collector_type_); 863 return true; 864 } 865 return false; 866 } 867 IsMovingGCDisabled(Thread * self)868 bool IsMovingGCDisabled(Thread* self) REQUIRES(!*gc_complete_lock_) { 869 MutexLock mu(self, *gc_complete_lock_); 870 return disable_moving_gc_count_ > 0; 871 } 872 873 // Request an asynchronous trim. 874 void RequestTrim(Thread* self) REQUIRES(!*pending_task_lock_); 875 876 // Retrieve the current GC number, i.e. the number n such that we completed n GCs so far. 877 // Provides acquire ordering, so that if we read this first, and then check whether a GC is 878 // required, we know that the GC number read actually preceded the test. GetCurrentGcNum()879 uint32_t GetCurrentGcNum() { 880 return gcs_completed_.load(std::memory_order_acquire); 881 } 882 883 // Request asynchronous GC. Observed_gc_num is the value of GetCurrentGcNum() when we started to 884 // evaluate the GC triggering condition. If a GC has been completed since then, we consider our 885 // job done. If we return true, then we ensured that gcs_completed_ will eventually be 886 // incremented beyond observed_gc_num. We return false only in corner cases in which we cannot 887 // ensure that. 888 bool RequestConcurrentGC(Thread* self, GcCause cause, bool force_full, uint32_t observed_gc_num) 889 REQUIRES(!*pending_task_lock_); 890 891 // Whether or not we may use a garbage collector, used so that we only create collectors we need. 892 bool MayUseCollector(CollectorType type) const; 893 894 // Used by tests to reduce timinig-dependent flakiness in OOME behavior. SetMinIntervalHomogeneousSpaceCompactionByOom(uint64_t interval)895 void SetMinIntervalHomogeneousSpaceCompactionByOom(uint64_t interval) { 896 min_interval_homogeneous_space_compaction_by_oom_ = interval; 897 } 898 899 // Helpers for android.os.Debug.getRuntimeStat(). 900 uint64_t GetGcCount() const; 901 uint64_t GetGcTime() const; 902 uint64_t GetBlockingGcCount() const; 903 uint64_t GetBlockingGcTime() const; 904 void DumpGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_); 905 void DumpBlockingGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_); GetTotalTimeWaitingForGC()906 uint64_t GetTotalTimeWaitingForGC() const { 907 return total_wait_time_; 908 } 909 uint64_t GetPreOomeGcCount() const; 910 911 // Perfetto Art Heap Profiler Support. GetHeapSampler()912 HeapSampler& GetHeapSampler() { 913 return heap_sampler_; 914 } 915 916 void InitPerfettoJavaHeapProf(); 917 // In NonTlab case: Check whether we should report a sample allocation and if so report it. 918 // Also update state (bytes_until_sample). 919 // By calling JHPCheckNonTlabSampleAllocation from different functions for Large allocations and 920 // non-moving allocations we are able to use the stack to identify these allocations separately. 921 EXPORT void JHPCheckNonTlabSampleAllocation(Thread* self, mirror::Object* ret, size_t alloc_size); 922 // In Tlab case: Calculate the next tlab size (location of next sample point) and whether 923 // a sample should be taken. 924 size_t JHPCalculateNextTlabSize(Thread* self, 925 size_t jhp_def_tlab_size, 926 size_t alloc_size, 927 bool* take_sample, 928 size_t* bytes_until_sample); 929 // Reduce the number of bytes to the next sample position by this adjustment. 930 void AdjustSampleOffset(size_t adjustment); 931 932 // Allocation tracking support 933 // Callers to this function use double-checked locking to ensure safety on allocation_records_ IsAllocTrackingEnabled()934 bool IsAllocTrackingEnabled() const { 935 return alloc_tracking_enabled_.load(std::memory_order_relaxed); 936 } 937 SetAllocTrackingEnabled(bool enabled)938 void SetAllocTrackingEnabled(bool enabled) REQUIRES(Locks::alloc_tracker_lock_) { 939 alloc_tracking_enabled_.store(enabled, std::memory_order_relaxed); 940 } 941 942 // Return the current stack depth of allocation records. GetAllocTrackerStackDepth()943 size_t GetAllocTrackerStackDepth() const { 944 return alloc_record_depth_; 945 } 946 947 // Return the current stack depth of allocation records. SetAllocTrackerStackDepth(size_t alloc_record_depth)948 void SetAllocTrackerStackDepth(size_t alloc_record_depth) { 949 alloc_record_depth_ = alloc_record_depth; 950 } 951 GetAllocationRecords()952 AllocRecordObjectMap* GetAllocationRecords() const REQUIRES(Locks::alloc_tracker_lock_) { 953 return allocation_records_.get(); 954 } 955 956 void SetAllocationRecords(AllocRecordObjectMap* records) 957 REQUIRES(Locks::alloc_tracker_lock_); 958 959 void VisitAllocationRecords(RootVisitor* visitor) const 960 REQUIRES_SHARED(Locks::mutator_lock_) 961 REQUIRES(!Locks::alloc_tracker_lock_); 962 963 void SweepAllocationRecords(IsMarkedVisitor* visitor) const 964 REQUIRES_SHARED(Locks::mutator_lock_) 965 REQUIRES(!Locks::alloc_tracker_lock_); 966 967 void DisallowNewAllocationRecords() const 968 REQUIRES_SHARED(Locks::mutator_lock_) 969 REQUIRES(!Locks::alloc_tracker_lock_); 970 971 void AllowNewAllocationRecords() const 972 REQUIRES_SHARED(Locks::mutator_lock_) 973 REQUIRES(!Locks::alloc_tracker_lock_); 974 975 void BroadcastForNewAllocationRecords() const 976 REQUIRES(!Locks::alloc_tracker_lock_); 977 978 void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_); 979 bool IsGCDisabledForShutdown() const REQUIRES(!*gc_complete_lock_); 980 981 // Create a new alloc space and compact default alloc space to it. 982 EXPORT HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact() 983 REQUIRES(!*gc_complete_lock_, !process_state_update_lock_); 984 EXPORT bool SupportHomogeneousSpaceCompactAndCollectorTransitions() const; 985 986 // Install an allocation listener. 987 EXPORT void SetAllocationListener(AllocationListener* l); 988 // Remove an allocation listener. Note: the listener must not be deleted, as for performance 989 // reasons, we assume it stays valid when we read it (so that we don't require a lock). 990 EXPORT void RemoveAllocationListener(); 991 992 // Install a gc pause listener. 993 EXPORT void SetGcPauseListener(GcPauseListener* l); 994 // Get the currently installed gc pause listener, or null. GetGcPauseListener()995 GcPauseListener* GetGcPauseListener() { 996 return gc_pause_listener_.load(std::memory_order_acquire); 997 } 998 // Remove a gc pause listener. Note: the listener must not be deleted, as for performance 999 // reasons, we assume it stays valid when we read it (so that we don't require a lock). 1000 EXPORT void RemoveGcPauseListener(); 1001 1002 EXPORT const Verification* GetVerification() const; 1003 1004 void PostForkChildAction(Thread* self) REQUIRES(!*gc_complete_lock_); 1005 1006 EXPORT void TraceHeapSize(size_t heap_size); 1007 1008 bool AddHeapTask(gc::HeapTask* task); 1009 1010 // TODO: Kernels for arm and x86 in both, 32-bit and 64-bit modes use 512 entries per page-table 1011 // page. Find a way to confirm that in userspace. 1012 // Address range covered by 1 Page Middle Directory (PMD) entry in the page table GetPMDSize()1013 static inline ALWAYS_INLINE size_t GetPMDSize() { 1014 return (gPageSize / sizeof(uint64_t)) * gPageSize; 1015 } 1016 // Address range covered by 1 Page Upper Directory (PUD) entry in the page table GetPUDSize()1017 static inline ALWAYS_INLINE size_t GetPUDSize() { 1018 return (gPageSize / sizeof(uint64_t)) * GetPMDSize(); 1019 } 1020 1021 // Returns the ideal alignment corresponding to page-table levels for the 1022 // given size. BestPageTableAlignment(size_t size)1023 static inline size_t BestPageTableAlignment(size_t size) { 1024 const size_t pud_size = GetPUDSize(); 1025 const size_t pmd_size = GetPMDSize(); 1026 return size < pud_size ? pmd_size : pud_size; 1027 } 1028 1029 private: 1030 class ConcurrentGCTask; 1031 class CollectorTransitionTask; 1032 class HeapTrimTask; 1033 class TriggerPostForkCCGcTask; 1034 class ReduceTargetFootprintTask; 1035 1036 // Compact source space to target space. Returns the collector used. 1037 collector::GarbageCollector* Compact(space::ContinuousMemMapAllocSpace* target_space, 1038 space::ContinuousMemMapAllocSpace* source_space, 1039 GcCause gc_cause) 1040 REQUIRES(Locks::mutator_lock_); 1041 1042 void LogGC(GcCause gc_cause, collector::GarbageCollector* collector); 1043 void StartGC(Thread* self, GcCause cause, CollectorType collector_type) 1044 REQUIRES(!*gc_complete_lock_); 1045 void StartGCRunnable(Thread* self, GcCause cause, CollectorType collector_type) 1046 REQUIRES(!*gc_complete_lock_) REQUIRES_SHARED(Locks::mutator_lock_); 1047 void FinishGC(Thread* self, collector::GcType gc_type) REQUIRES(!*gc_complete_lock_); 1048 1049 double CalculateGcWeightedAllocatedBytes(uint64_t gc_last_process_cpu_time_ns, 1050 uint64_t current_process_cpu_time) const; 1051 1052 // Create a mem map with a preferred base address. 1053 static MemMap MapAnonymousPreferredAddress(const char* name, 1054 uint8_t* request_begin, 1055 size_t capacity, 1056 std::string* out_error_str); 1057 SupportHSpaceCompaction()1058 bool SupportHSpaceCompaction() const { 1059 // Returns true if we can do hspace compaction 1060 return main_space_backup_ != nullptr; 1061 } 1062 1063 // Size_t saturating arithmetic UnsignedDifference(size_t x,size_t y)1064 static ALWAYS_INLINE size_t UnsignedDifference(size_t x, size_t y) { 1065 return x > y ? x - y : 0; 1066 } UnsignedSum(size_t x,size_t y)1067 static ALWAYS_INLINE size_t UnsignedSum(size_t x, size_t y) { 1068 return x + y >= x ? x + y : std::numeric_limits<size_t>::max(); 1069 } 1070 AllocatorHasAllocationStack(AllocatorType allocator_type)1071 static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) { 1072 return 1073 allocator_type != kAllocatorTypeRegionTLAB && 1074 allocator_type != kAllocatorTypeBumpPointer && 1075 allocator_type != kAllocatorTypeTLAB && 1076 allocator_type != kAllocatorTypeRegion; 1077 } IsMovingGc(CollectorType collector_type)1078 static bool IsMovingGc(CollectorType collector_type) { 1079 return 1080 collector_type == kCollectorTypeCC || 1081 collector_type == kCollectorTypeSS || 1082 collector_type == kCollectorTypeCMC || 1083 collector_type == kCollectorTypeCCBackground || 1084 collector_type == kCollectorTypeCMCBackground || 1085 collector_type == kCollectorTypeHomogeneousSpaceCompact; 1086 } 1087 bool ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const 1088 REQUIRES_SHARED(Locks::mutator_lock_); 1089 1090 // Checks whether we should garbage collect: 1091 ALWAYS_INLINE bool ShouldConcurrentGCForJava(size_t new_num_bytes_allocated); 1092 float NativeMemoryOverTarget(size_t current_native_bytes, bool is_gc_concurrent); 1093 void CheckGCForNative(Thread* self) 1094 REQUIRES(!*pending_task_lock_, !*gc_complete_lock_, !process_state_update_lock_); 1095 GetMarkStack()1096 accounting::ObjectStack* GetMarkStack() { 1097 return mark_stack_.get(); 1098 } 1099 1100 // We don't force this to be inlined since it is a slow path. 1101 template <bool kInstrumented, typename PreFenceVisitor> 1102 mirror::Object* AllocLargeObject(Thread* self, 1103 ObjPtr<mirror::Class>* klass, 1104 size_t byte_count, 1105 const PreFenceVisitor& pre_fence_visitor) 1106 REQUIRES_SHARED(Locks::mutator_lock_) 1107 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, 1108 !*backtrace_lock_, !process_state_update_lock_); 1109 1110 // Handles Allocate()'s slow allocation path with GC involved after an initial allocation 1111 // attempt failed. 1112 // Called with thread suspension disallowed, but re-enables it, and may suspend, internally. 1113 // Returns null if instrumentation or the allocator changed. 1114 EXPORT mirror::Object* AllocateInternalWithGc(Thread* self, 1115 AllocatorType allocator, 1116 bool instrumented, 1117 size_t num_bytes, 1118 size_t* bytes_allocated, 1119 size_t* usable_size, 1120 size_t* bytes_tl_bulk_allocated, 1121 ObjPtr<mirror::Class>* klass) 1122 REQUIRES(!Locks::thread_suspend_count_lock_, !*gc_complete_lock_, !*pending_task_lock_) 1123 REQUIRES(Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_); 1124 1125 // Allocate into a specific space. 1126 mirror::Object* AllocateInto(Thread* self, 1127 space::AllocSpace* space, 1128 ObjPtr<mirror::Class> c, 1129 size_t bytes) 1130 REQUIRES_SHARED(Locks::mutator_lock_); 1131 1132 // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the 1133 // wrong space. 1134 void SwapSemiSpaces() REQUIRES(Locks::mutator_lock_); 1135 1136 // Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so 1137 // that the switch statement is constant optimized in the entrypoints. 1138 template <const bool kInstrumented, const bool kGrow> 1139 ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self, 1140 AllocatorType allocator_type, 1141 size_t alloc_size, 1142 size_t* bytes_allocated, 1143 size_t* usable_size, 1144 size_t* bytes_tl_bulk_allocated) 1145 REQUIRES_SHARED(Locks::mutator_lock_); 1146 1147 EXPORT mirror::Object* AllocWithNewTLAB(Thread* self, 1148 AllocatorType allocator_type, 1149 size_t alloc_size, 1150 bool grow, 1151 size_t* bytes_allocated, 1152 size_t* usable_size, 1153 size_t* bytes_tl_bulk_allocated) 1154 REQUIRES_SHARED(Locks::mutator_lock_); 1155 1156 void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) 1157 REQUIRES_SHARED(Locks::mutator_lock_); 1158 1159 // Are we out of memory, and thus should force a GC or fail? 1160 // For concurrent collectors, out of memory is defined by growth_limit_. 1161 // For nonconcurrent collectors it is defined by target_footprint_ unless grow is 1162 // set. If grow is set, the limit is growth_limit_ and we adjust target_footprint_ 1163 // to accomodate the allocation. 1164 ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, 1165 size_t alloc_size, 1166 bool grow); 1167 1168 // Blocks the caller until the garbage collector becomes idle and returns the type of GC we 1169 // waited for. 1170 collector::GcType WaitForGcToCompleteLocked(GcCause cause, Thread* self) 1171 REQUIRES(gc_complete_lock_); 1172 1173 void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) 1174 REQUIRES(!*pending_task_lock_); 1175 1176 EXPORT void RequestConcurrentGCAndSaveObject(Thread* self, 1177 bool force_full, 1178 uint32_t observed_gc_num, 1179 ObjPtr<mirror::Object>* obj) 1180 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*pending_task_lock_); 1181 1182 static constexpr uint32_t GC_NUM_ANY = std::numeric_limits<uint32_t>::max(); 1183 1184 // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns 1185 // which type of Gc was actually run. 1186 // We pass in the intended GC sequence number to ensure that multiple approximately concurrent 1187 // requests result in a single GC; clearly redundant request will be pruned. A requested_gc_num 1188 // of GC_NUM_ANY indicates that we should not prune redundant requests. (In the unlikely case 1189 // that gcs_completed_ gets this big, we just accept a potential extra GC or two.) 1190 collector::GcType CollectGarbageInternal(collector::GcType gc_plan, 1191 GcCause gc_cause, 1192 bool clear_soft_references, 1193 uint32_t requested_gc_num) 1194 REQUIRES(!*gc_complete_lock_, !Locks::heap_bitmap_lock_, !Locks::thread_suspend_count_lock_, 1195 !*pending_task_lock_, !process_state_update_lock_); 1196 1197 void PreGcVerification(collector::GarbageCollector* gc) 1198 REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_); 1199 void PreGcVerificationPaused(collector::GarbageCollector* gc) 1200 REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_); 1201 void PrePauseRosAllocVerification(collector::GarbageCollector* gc) 1202 REQUIRES(Locks::mutator_lock_); 1203 void PreSweepingGcVerification(collector::GarbageCollector* gc) 1204 REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_); 1205 void PostGcVerification(collector::GarbageCollector* gc) 1206 REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_); 1207 void PostGcVerificationPaused(collector::GarbageCollector* gc) 1208 REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_); 1209 1210 // Find a collector based on GC type. 1211 collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type); 1212 1213 // Create the main free list malloc space, either a RosAlloc space or DlMalloc space. 1214 void CreateMainMallocSpace(MemMap&& mem_map, 1215 size_t initial_size, 1216 size_t growth_limit, 1217 size_t capacity); 1218 1219 // Create a malloc space based on a mem map. Does not set the space as default. 1220 space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap&& mem_map, 1221 size_t initial_size, 1222 size_t growth_limit, 1223 size_t capacity, 1224 const char* name, 1225 bool can_move_objects); 1226 1227 // Given the current contents of the alloc space, increase the allowed heap footprint to match 1228 // the target utilization ratio. This should only be called immediately after a full garbage 1229 // collection. bytes_allocated_before_gc is used to measure bytes / second for the period which 1230 // the GC was run. 1231 // This is only called by the thread that set collector_type_running_ to a value other than 1232 // kCollectorTypeNone, or while holding gc_complete_lock, and ensuring that 1233 // collector_type_running_ is kCollectorTypeNone. 1234 void GrowForUtilization(collector::GarbageCollector* collector_ran, 1235 size_t bytes_allocated_before_gc = 0) 1236 REQUIRES(!process_state_update_lock_); 1237 1238 size_t GetPercentFree(); 1239 1240 // Swap the allocation stack with the live stack. 1241 void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_); 1242 1243 // Clear cards and update the mod union table. When process_alloc_space_cards is true, 1244 // if clear_alloc_space_cards is true, then we clear cards instead of ageing them. We do 1245 // not process the alloc space if process_alloc_space_cards is false. 1246 void ProcessCards(TimingLogger* timings, 1247 bool use_rem_sets, 1248 bool process_alloc_space_cards, 1249 bool clear_alloc_space_cards) 1250 REQUIRES_SHARED(Locks::mutator_lock_); 1251 1252 // Push an object onto the allocation stack. 1253 void PushOnAllocationStack(Thread* self, ObjPtr<mirror::Object>* obj) 1254 REQUIRES_SHARED(Locks::mutator_lock_) 1255 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_); 1256 void PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj) 1257 REQUIRES_SHARED(Locks::mutator_lock_) 1258 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_); 1259 EXPORT void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, 1260 ObjPtr<mirror::Object>* obj) 1261 REQUIRES_SHARED(Locks::mutator_lock_) 1262 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_); 1263 1264 void ClearPendingTrim(Thread* self) REQUIRES(!*pending_task_lock_); 1265 void ClearPendingCollectorTransition(Thread* self) REQUIRES(!*pending_task_lock_); 1266 1267 // What kind of concurrency behavior is the runtime after? IsGcConcurrent()1268 bool IsGcConcurrent() const ALWAYS_INLINE { 1269 return collector_type_ == kCollectorTypeCC || 1270 collector_type_ == kCollectorTypeCMC || 1271 collector_type_ == kCollectorTypeCMS || 1272 collector_type_ == kCollectorTypeCCBackground || 1273 collector_type_ == kCollectorTypeCMCBackground; 1274 } 1275 1276 // Trim the managed and native spaces by releasing unused memory back to the OS. 1277 void TrimSpaces(Thread* self) REQUIRES(!*gc_complete_lock_); 1278 1279 // Trim 0 pages at the end of reference tables. 1280 void TrimIndirectReferenceTables(Thread* self); 1281 1282 template <typename Visitor> 1283 ALWAYS_INLINE void VisitObjectsInternal(Visitor&& visitor) 1284 REQUIRES_SHARED(Locks::mutator_lock_) 1285 REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_); 1286 template <typename Visitor> 1287 ALWAYS_INLINE void VisitObjectsInternalRegionSpace(Visitor&& visitor) 1288 REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_); 1289 1290 void UpdateGcCountRateHistograms() REQUIRES(gc_complete_lock_); 1291 1292 // GC stress mode attempts to do one GC per unique backtrace. 1293 EXPORT void CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) 1294 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*gc_complete_lock_, 1295 !*pending_task_lock_, 1296 !*backtrace_lock_, 1297 !process_state_update_lock_); 1298 NonStickyGcType()1299 collector::GcType NonStickyGcType() const { 1300 return HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull; 1301 } 1302 1303 // Return the amount of space we allow for native memory when deciding whether to 1304 // collect. We collect when a weighted sum of Java memory plus native memory exceeds 1305 // the similarly weighted sum of the Java heap size target and this value. NativeAllocationGcWatermark()1306 ALWAYS_INLINE size_t NativeAllocationGcWatermark() const { 1307 // We keep the traditional limit of max_free_ in place for small heaps, 1308 // but allow it to be adjusted upward for large heaps to limit GC overhead. 1309 return target_footprint_.load(std::memory_order_relaxed) / 8 + max_free_; 1310 } 1311 1312 ALWAYS_INLINE void IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke); 1313 1314 // On switching app from background to foreground, grow the heap size 1315 // to incorporate foreground heap growth multiplier. 1316 void GrowHeapOnJankPerceptibleSwitch() REQUIRES(!process_state_update_lock_); 1317 1318 // Update *_freed_ever_ counters to reflect current GC values. 1319 void IncrementFreedEver(); 1320 1321 // Remove a vlog code from heap-inl.h which is transitively included in half the world. 1322 EXPORT static void VlogHeapGrowth(size_t max_allowed_footprint, 1323 size_t new_footprint, 1324 size_t alloc_size); 1325 1326 // Return our best approximation of the number of bytes of native memory that 1327 // are currently in use, and could possibly be reclaimed as an indirect result 1328 // of a garbage collection. 1329 size_t GetNativeBytes(); 1330 1331 // Set concurrent_start_bytes_ to a reasonable guess, given target_footprint_ . 1332 void SetDefaultConcurrentStartBytes() REQUIRES(!*gc_complete_lock_); 1333 // This version assumes no concurrent updaters. 1334 void SetDefaultConcurrentStartBytesLocked(); 1335 1336 // All-known continuous spaces, where objects lie within fixed bounds. 1337 std::vector<space::ContinuousSpace*> continuous_spaces_ GUARDED_BY(Locks::mutator_lock_); 1338 1339 // All-known discontinuous spaces, where objects may be placed throughout virtual memory. 1340 std::vector<space::DiscontinuousSpace*> discontinuous_spaces_ GUARDED_BY(Locks::mutator_lock_); 1341 1342 // All-known alloc spaces, where objects may be or have been allocated. 1343 std::vector<space::AllocSpace*> alloc_spaces_; 1344 1345 // A space where non-movable objects are allocated, when compaction is enabled it contains 1346 // Classes, ArtMethods, ArtFields, and non moving objects. 1347 space::MallocSpace* non_moving_space_; 1348 1349 // Space which we use for the kAllocatorTypeROSAlloc. 1350 space::RosAllocSpace* rosalloc_space_; 1351 1352 // Space which we use for the kAllocatorTypeDlMalloc. 1353 space::DlMallocSpace* dlmalloc_space_; 1354 1355 // The main space is the space which the GC copies to and from on process state updates. This 1356 // space is typically either the dlmalloc_space_ or the rosalloc_space_. 1357 space::MallocSpace* main_space_; 1358 1359 // The large object space we are currently allocating into. 1360 space::LargeObjectSpace* large_object_space_; 1361 1362 // The card table, dirtied by the write barrier. 1363 std::unique_ptr<accounting::CardTable> card_table_; 1364 1365 std::unique_ptr<accounting::ReadBarrierTable> rb_table_; 1366 1367 // A mod-union table remembers all of the references from the it's space to other spaces. 1368 AllocationTrackingSafeMap<space::Space*, accounting::ModUnionTable*, kAllocatorTagHeap> 1369 mod_union_tables_; 1370 1371 // A remembered set remembers all of the references from the it's space to the target space. 1372 AllocationTrackingSafeMap<space::Space*, accounting::RememberedSet*, kAllocatorTagHeap> 1373 remembered_sets_; 1374 1375 // The current collector type. 1376 CollectorType collector_type_; 1377 // Which collector we use when the app is in the foreground. 1378 const CollectorType foreground_collector_type_; 1379 // Which collector we will use when the app is notified of a transition to background. 1380 CollectorType background_collector_type_; 1381 // Desired collector type, heap trimming daemon transitions the heap if it is != collector_type_. 1382 CollectorType desired_collector_type_; 1383 1384 // Lock which guards pending tasks. 1385 Mutex* pending_task_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 1386 1387 // How many GC threads we may use for paused parts of garbage collection. 1388 const size_t parallel_gc_threads_; 1389 1390 // How many GC threads we may use for unpaused parts of garbage collection. 1391 const size_t conc_gc_threads_; 1392 1393 // Boolean for if we are in low memory mode. 1394 const bool low_memory_mode_; 1395 1396 // If we get a pause longer than long pause log threshold, then we print out the GC after it 1397 // finishes. 1398 const size_t long_pause_log_threshold_; 1399 1400 // If we get a GC longer than long GC log threshold, then we print out the GC after it finishes. 1401 const size_t long_gc_log_threshold_; 1402 1403 // Starting time of the new process; meant to be used for measuring total process CPU time. 1404 uint64_t process_cpu_start_time_ns_; 1405 1406 // Last time (before and after) GC started; meant to be used to measure the 1407 // duration between two GCs. 1408 uint64_t pre_gc_last_process_cpu_time_ns_; 1409 uint64_t post_gc_last_process_cpu_time_ns_; 1410 1411 // allocated_bytes * (current_process_cpu_time - [pre|post]_gc_last_process_cpu_time) 1412 double pre_gc_weighted_allocated_bytes_; 1413 double post_gc_weighted_allocated_bytes_; 1414 1415 // If we ignore the target footprint it lets the heap grow until it hits the heap capacity, this 1416 // is useful for benchmarking since it reduces time spent in GC to a low %. 1417 const bool ignore_target_footprint_; 1418 1419 // If we are running tests or some other configurations we might not actually 1420 // want logs for explicit gcs since they can get spammy. 1421 const bool always_log_explicit_gcs_; 1422 1423 // Lock which guards zygote space creation. 1424 Mutex zygote_creation_lock_; 1425 1426 // Non-null iff we have a zygote space. Doesn't contain the large objects allocated before 1427 // zygote space creation. 1428 space::ZygoteSpace* zygote_space_; 1429 1430 // Minimum allocation size of large object. 1431 size_t large_object_threshold_; 1432 1433 // Guards access to the state of GC, associated conditional variable is used to signal when a GC 1434 // completes. 1435 Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 1436 std::unique_ptr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_); 1437 1438 // Used to synchronize between JNI critical calls and the thread flip of the CC collector. 1439 Mutex* thread_flip_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 1440 std::unique_ptr<ConditionVariable> thread_flip_cond_ GUARDED_BY(thread_flip_lock_); 1441 // This counter keeps track of how many threads are currently in a JNI critical section. This is 1442 // incremented once per thread even with nested enters. 1443 size_t disable_thread_flip_count_ GUARDED_BY(thread_flip_lock_); 1444 bool thread_flip_running_ GUARDED_BY(thread_flip_lock_); 1445 1446 // Reference processor; 1447 std::unique_ptr<ReferenceProcessor> reference_processor_; 1448 1449 // Task processor, proxies heap trim requests to the daemon threads. 1450 std::unique_ptr<TaskProcessor> task_processor_; 1451 1452 // The following are declared volatile only for debugging purposes; it shouldn't otherwise 1453 // matter. 1454 1455 // Collector type of the running GC. 1456 volatile CollectorType collector_type_running_ GUARDED_BY(gc_complete_lock_); 1457 1458 // Cause of the last running or attempted GC or GC-like action. 1459 volatile GcCause last_gc_cause_ GUARDED_BY(gc_complete_lock_); 1460 1461 // The thread currently running the GC. 1462 volatile Thread* thread_running_gc_ GUARDED_BY(gc_complete_lock_); 1463 1464 // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on. 1465 volatile collector::GcType last_gc_type_ GUARDED_BY(gc_complete_lock_); 1466 collector::GcType next_gc_type_; 1467 1468 // Maximum size that the heap can reach. 1469 size_t capacity_; 1470 1471 // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap 1472 // programs it is "cleared" making it the same as capacity. 1473 // Only weakly enforced for simultaneous allocations. 1474 size_t growth_limit_; 1475 1476 // Requested initial heap size. Temporarily ignored after a fork, but then reestablished after 1477 // a while to usually trigger the initial GC. 1478 size_t initial_heap_size_; 1479 1480 // Target size (as in maximum allocatable bytes) for the heap. Weakly enforced as a limit for 1481 // non-concurrent GC. Used as a guideline for computing concurrent_start_bytes_ in the 1482 // concurrent GC case. Updates normally occur while collector_type_running_ is not none. 1483 Atomic<size_t> target_footprint_; 1484 1485 Mutex process_state_update_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 1486 1487 // Computed with foreground-multiplier in GrowForUtilization() when run in 1488 // jank non-perceptible state. On update to process state from background to 1489 // foreground we set target_footprint_ and concurrent_start_bytes_ to the corresponding value. 1490 size_t min_foreground_target_footprint_ GUARDED_BY(process_state_update_lock_); 1491 size_t min_foreground_concurrent_start_bytes_ GUARDED_BY(process_state_update_lock_); 1492 1493 // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that 1494 // it completes ahead of an allocation failing. 1495 // A multiple of this is also used to determine when to trigger a GC in response to native 1496 // allocation. 1497 // After initialization, this is only updated by the thread that set collector_type_running_ to 1498 // a value other than kCollectorTypeNone, or while holding gc_complete_lock, and ensuring that 1499 // collector_type_running_ is kCollectorTypeNone. 1500 size_t concurrent_start_bytes_; 1501 1502 // Since the heap was created, how many bytes have been freed. 1503 std::atomic<int64_t> total_bytes_freed_ever_; 1504 1505 // Since the heap was created, how many objects have been freed. 1506 std::atomic<uint64_t> total_objects_freed_ever_; 1507 1508 // Number of bytes currently allocated and not yet reclaimed. Includes active 1509 // TLABS in their entirety, even if they have not yet been parceled out. 1510 Atomic<size_t> num_bytes_allocated_; 1511 1512 // Number of registered native bytes allocated. Adjusted after each RegisterNativeAllocation and 1513 // RegisterNativeFree. Used to help determine when to trigger GC for native allocations. Should 1514 // not include bytes allocated through the system malloc, since those are implicitly included. 1515 Atomic<size_t> native_bytes_registered_; 1516 1517 // Approximately the smallest value of GetNativeBytes() we've seen since the last GC. 1518 Atomic<size_t> old_native_bytes_allocated_; 1519 1520 // Total number of native objects of which we were notified since the beginning of time, mod 2^32. 1521 // Allows us to check for GC only roughly every kNotifyNativeInterval allocations. 1522 Atomic<uint32_t> native_objects_notified_; 1523 1524 // Number of bytes freed by thread local buffer revokes. This will 1525 // cancel out the ahead-of-time bulk counting of bytes allocated in 1526 // rosalloc thread-local buffers. It is temporarily accumulated 1527 // here to be subtracted from num_bytes_allocated_ later at the next 1528 // GC. 1529 Atomic<size_t> num_bytes_freed_revoke_; 1530 1531 // Records the number of bytes allocated at the time of GC, which is used later to calculate 1532 // how many bytes have been allocated since the last GC 1533 size_t num_bytes_alive_after_gc_; 1534 1535 // Info related to the current or previous GC iteration. 1536 collector::Iteration current_gc_iteration_; 1537 1538 // Heap verification flags. 1539 const bool verify_missing_card_marks_; 1540 const bool verify_system_weaks_; 1541 const bool verify_pre_gc_heap_; 1542 const bool verify_pre_sweeping_heap_; 1543 const bool verify_post_gc_heap_; 1544 const bool verify_mod_union_table_; 1545 bool verify_pre_gc_rosalloc_; 1546 bool verify_pre_sweeping_rosalloc_; 1547 bool verify_post_gc_rosalloc_; 1548 const bool gc_stress_mode_; 1549 1550 // RAII that temporarily disables the rosalloc verification during 1551 // the zygote fork. 1552 class ScopedDisableRosAllocVerification { 1553 private: 1554 Heap* const heap_; 1555 const bool orig_verify_pre_gc_; 1556 const bool orig_verify_pre_sweeping_; 1557 const bool orig_verify_post_gc_; 1558 1559 public: ScopedDisableRosAllocVerification(Heap * heap)1560 explicit ScopedDisableRosAllocVerification(Heap* heap) 1561 : heap_(heap), 1562 orig_verify_pre_gc_(heap_->verify_pre_gc_rosalloc_), 1563 orig_verify_pre_sweeping_(heap_->verify_pre_sweeping_rosalloc_), 1564 orig_verify_post_gc_(heap_->verify_post_gc_rosalloc_) { 1565 heap_->verify_pre_gc_rosalloc_ = false; 1566 heap_->verify_pre_sweeping_rosalloc_ = false; 1567 heap_->verify_post_gc_rosalloc_ = false; 1568 } ~ScopedDisableRosAllocVerification()1569 ~ScopedDisableRosAllocVerification() { 1570 heap_->verify_pre_gc_rosalloc_ = orig_verify_pre_gc_; 1571 heap_->verify_pre_sweeping_rosalloc_ = orig_verify_pre_sweeping_; 1572 heap_->verify_post_gc_rosalloc_ = orig_verify_post_gc_; 1573 } 1574 }; 1575 1576 // Parallel GC data structures. 1577 std::unique_ptr<ThreadPool> thread_pool_; 1578 1579 // A bitmap that is set corresponding to the known live objects since the last GC cycle. 1580 std::unique_ptr<accounting::HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_); 1581 // A bitmap that is set corresponding to the marked objects in the current GC cycle. 1582 std::unique_ptr<accounting::HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_); 1583 1584 // Mark stack that we reuse to avoid re-allocating the mark stack. 1585 std::unique_ptr<accounting::ObjectStack> mark_stack_; 1586 1587 // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us 1588 // to use the live bitmap as the old mark bitmap. 1589 const size_t max_allocation_stack_size_; 1590 std::unique_ptr<accounting::ObjectStack> allocation_stack_; 1591 1592 // Second allocation stack so that we can process allocation with the heap unlocked. 1593 std::unique_ptr<accounting::ObjectStack> live_stack_; 1594 1595 // Allocator type. 1596 AllocatorType current_allocator_; 1597 const AllocatorType current_non_moving_allocator_; 1598 1599 // Which GCs we run in order when an allocation fails. 1600 std::vector<collector::GcType> gc_plan_; 1601 1602 // Bump pointer spaces. 1603 space::BumpPointerSpace* bump_pointer_space_; 1604 // Temp space is the space which the semispace collector copies to. 1605 space::BumpPointerSpace* temp_space_; 1606 1607 // Region space, used by the concurrent collector. 1608 space::RegionSpace* region_space_; 1609 1610 // Minimum free guarantees that you always have at least min_free_ free bytes after growing for 1611 // utilization, regardless of target utilization ratio. 1612 const size_t min_free_; 1613 1614 // The ideal maximum free size, when we grow the heap for utilization. 1615 const size_t max_free_; 1616 1617 // Target ideal heap utilization ratio. 1618 double target_utilization_; 1619 1620 // How much more we grow the heap when we are a foreground app instead of background. 1621 double foreground_heap_growth_multiplier_; 1622 1623 // The amount of native memory allocation since the last GC required to cause us to wait for a 1624 // collection as a result of native allocation. Very large values can cause the device to run 1625 // out of memory, due to lack of finalization to reclaim native memory. Making it too small can 1626 // cause jank in apps like launcher that intentionally allocate large amounts of memory in rapid 1627 // succession. (b/122099093) 1/4 to 1/3 of physical memory seems to be a good number. 1628 const size_t stop_for_native_allocs_; 1629 1630 // Total time which mutators are paused or waiting for GC to complete. 1631 uint64_t total_wait_time_; 1632 1633 // The current state of heap verification, may be enabled or disabled. 1634 VerifyObjectMode verify_object_mode_; 1635 1636 // Compacting GC disable count, prevents compacting GC from running iff > 0. 1637 size_t disable_moving_gc_count_ GUARDED_BY(gc_complete_lock_); 1638 1639 std::vector<collector::GarbageCollector*> garbage_collectors_; 1640 collector::SemiSpace* semi_space_collector_; 1641 collector::MarkCompact* mark_compact_; 1642 Atomic<collector::ConcurrentCopying*> active_concurrent_copying_collector_; 1643 collector::ConcurrentCopying* young_concurrent_copying_collector_; 1644 collector::ConcurrentCopying* concurrent_copying_collector_; 1645 1646 const bool is_running_on_memory_tool_; 1647 const bool use_tlab_; 1648 1649 // Pointer to the space which becomes the new main space when we do homogeneous space compaction. 1650 // Use unique_ptr since the space is only added during the homogeneous compaction phase. 1651 std::unique_ptr<space::MallocSpace> main_space_backup_; 1652 1653 // Minimal interval allowed between two homogeneous space compactions caused by OOM. 1654 uint64_t min_interval_homogeneous_space_compaction_by_oom_; 1655 1656 // Times of the last homogeneous space compaction caused by OOM. 1657 uint64_t last_time_homogeneous_space_compaction_by_oom_; 1658 1659 // Saved OOMs by homogeneous space compaction. 1660 Atomic<size_t> count_delayed_oom_; 1661 1662 // Count for requested homogeneous space compaction. 1663 Atomic<size_t> count_requested_homogeneous_space_compaction_; 1664 1665 // Count for ignored homogeneous space compaction. 1666 Atomic<size_t> count_ignored_homogeneous_space_compaction_; 1667 1668 // Count for performed homogeneous space compaction. 1669 Atomic<size_t> count_performed_homogeneous_space_compaction_; 1670 1671 // The number of garbage collections (either young or full, not trims or the like) we have 1672 // completed since heap creation. We include requests that turned out to be impossible 1673 // because they were disabled. We guard against wrapping, though that's unlikely. 1674 // Increment is guarded by gc_complete_lock_. 1675 Atomic<uint32_t> gcs_completed_; 1676 1677 // The number of the last garbage collection that has been requested. A value of gcs_completed 1678 // + 1 indicates that another collection is needed or in progress. A value of gcs_completed_ or 1679 // (logically) less means that no new GC has been requested. 1680 Atomic<uint32_t> max_gc_requested_; 1681 1682 // Active tasks which we can modify (change target time, desired collector type, etc..). 1683 CollectorTransitionTask* pending_collector_transition_ GUARDED_BY(pending_task_lock_); 1684 HeapTrimTask* pending_heap_trim_ GUARDED_BY(pending_task_lock_); 1685 1686 // Whether or not we use homogeneous space compaction to avoid OOM errors. 1687 bool use_homogeneous_space_compaction_for_oom_; 1688 1689 // If true, enable generational collection when using the Concurrent Copying 1690 // (CC) collector, i.e. use sticky-bit CC for minor collections and (full) CC 1691 // for major collections. Set in Heap constructor. 1692 const bool use_generational_cc_; 1693 1694 // True if the currently running collection has made some thread wait. 1695 bool running_collection_is_blocking_ GUARDED_BY(gc_complete_lock_); 1696 // The number of blocking GC runs. 1697 uint64_t blocking_gc_count_; 1698 // The total duration of blocking GC runs. 1699 uint64_t blocking_gc_time_; 1700 // The duration of the window for the GC count rate histograms. 1701 static constexpr uint64_t kGcCountRateHistogramWindowDuration = MsToNs(10 * 1000); // 10s. 1702 // Maximum number of missed histogram windows for which statistics will be collected. 1703 static constexpr uint64_t kGcCountRateHistogramMaxNumMissedWindows = 100; 1704 // The last time when the GC count rate histograms were updated. 1705 // This is rounded by kGcCountRateHistogramWindowDuration (a multiple of 10s). 1706 uint64_t last_update_time_gc_count_rate_histograms_; 1707 // The running count of GC runs in the last window. 1708 uint64_t gc_count_last_window_; 1709 // The running count of blocking GC runs in the last window. 1710 uint64_t blocking_gc_count_last_window_; 1711 // The maximum number of buckets in the GC count rate histograms. 1712 static constexpr size_t kGcCountRateMaxBucketCount = 200; 1713 // The histogram of the number of GC invocations per window duration. 1714 Histogram<uint64_t> gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_); 1715 // The histogram of the number of blocking GC invocations per window duration. 1716 Histogram<uint64_t> blocking_gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_); 1717 1718 // Allocation tracking support 1719 Atomic<bool> alloc_tracking_enabled_; 1720 std::unique_ptr<AllocRecordObjectMap> allocation_records_; 1721 size_t alloc_record_depth_; 1722 1723 // Perfetto Java Heap Profiler support. 1724 HeapSampler heap_sampler_; 1725 1726 // GC stress related data structures. 1727 Mutex* backtrace_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 1728 // Debugging variables, seen backtraces vs unique backtraces. 1729 Atomic<uint64_t> seen_backtrace_count_; 1730 Atomic<uint64_t> unique_backtrace_count_; 1731 // Stack trace hashes that we already saw, 1732 std::unordered_set<uint64_t> seen_backtraces_ GUARDED_BY(backtrace_lock_); 1733 1734 // We disable GC when we are shutting down the runtime in case there are daemon threads still 1735 // allocating. 1736 bool gc_disabled_for_shutdown_ GUARDED_BY(gc_complete_lock_); 1737 1738 // Turned on by -XX:DumpRegionInfoBeforeGC and -XX:DumpRegionInfoAfterGC to 1739 // emit region info before and after each GC cycle. 1740 bool dump_region_info_before_gc_; 1741 bool dump_region_info_after_gc_; 1742 1743 // Boot image spaces. 1744 std::vector<space::ImageSpace*> boot_image_spaces_; 1745 1746 // Boot image address range. Includes images and oat files. 1747 uint32_t boot_images_start_address_; 1748 uint32_t boot_images_size_; 1749 1750 // The number of times we initiated a GC of last resort to try to avoid an OOME. 1751 Atomic<uint64_t> pre_oome_gc_count_; 1752 1753 // An installed allocation listener. 1754 Atomic<AllocationListener*> alloc_listener_; 1755 // An installed GC Pause listener. 1756 Atomic<GcPauseListener*> gc_pause_listener_; 1757 1758 std::unique_ptr<Verification> verification_; 1759 1760 friend class CollectorTransitionTask; 1761 friend class collector::GarbageCollector; 1762 friend class collector::ConcurrentCopying; 1763 friend class collector::MarkCompact; 1764 friend class collector::MarkSweep; 1765 friend class collector::SemiSpace; 1766 friend class GCCriticalSection; 1767 friend class ReferenceQueue; 1768 friend class ScopedGCCriticalSection; 1769 friend class ScopedInterruptibleGCCriticalSection; 1770 friend class VerifyReferenceCardVisitor; 1771 friend class VerifyReferenceVisitor; 1772 friend class VerifyObjectVisitor; 1773 1774 DISALLOW_IMPLICIT_CONSTRUCTORS(Heap); 1775 }; 1776 1777 } // namespace gc 1778 } // namespace art 1779 1780 #endif // ART_RUNTIME_GC_HEAP_H_ 1781