1 /* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_ 18 #define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_ 19 20 #include "base/macros.h" 21 #include "garbage_collector.h" 22 #include "gc/accounting/space_bitmap.h" 23 #include "immune_spaces.h" 24 #include "offsets.h" 25 26 #include <map> 27 #include <memory> 28 #include <unordered_map> 29 #include <vector> 30 31 namespace art HIDDEN { 32 class Barrier; 33 class Closure; 34 class RootInfo; 35 36 namespace mirror { 37 template<class MirrorType> class CompressedReference; 38 template<class MirrorType> class HeapReference; 39 class Object; 40 } // namespace mirror 41 42 namespace gc { 43 44 namespace accounting { 45 template<typename T> class AtomicStack; 46 using ObjectStack = AtomicStack<mirror::Object>; 47 template <size_t kAlignment> class SpaceBitmap; 48 using ContinuousSpaceBitmap = SpaceBitmap<kObjectAlignment>; 49 class HeapBitmap; 50 class ReadBarrierTable; 51 } // namespace accounting 52 53 namespace space { 54 class RegionSpace; 55 } // namespace space 56 57 namespace collector { 58 59 class ConcurrentCopying : public GarbageCollector { 60 public: 61 // Enable the no-from-space-refs verification at the pause. 62 static constexpr bool kEnableNoFromSpaceRefsVerification = kIsDebugBuild; 63 // Enable the from-space bytes/objects check. 64 static constexpr bool kEnableFromSpaceAccountingCheck = kIsDebugBuild; 65 // Enable verbose mode. 66 static constexpr bool kVerboseMode = false; 67 // If kGrayDirtyImmuneObjects is true then we gray dirty objects in the GC pause to prevent dirty 68 // pages. 69 static constexpr bool kGrayDirtyImmuneObjects = true; 70 71 ConcurrentCopying(Heap* heap, 72 bool young_gen, 73 bool use_generational_cc, 74 const std::string& name_prefix = "", 75 bool measure_read_barrier_slow_path = false); 76 ~ConcurrentCopying(); 77 78 void RunPhases() override 79 REQUIRES(!immune_gray_stack_lock_, 80 !mark_stack_lock_, 81 !rb_slow_path_histogram_lock_, 82 !skipped_blocks_lock_); 83 void InitializePhase() REQUIRES_SHARED(Locks::mutator_lock_) 84 REQUIRES(!mark_stack_lock_, !immune_gray_stack_lock_); 85 void MarkingPhase() REQUIRES_SHARED(Locks::mutator_lock_) 86 REQUIRES(!mark_stack_lock_); 87 void CopyingPhase() REQUIRES_SHARED(Locks::mutator_lock_) 88 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); 89 void ReclaimPhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); 90 void FinishPhase() REQUIRES(!mark_stack_lock_, 91 !rb_slow_path_histogram_lock_, 92 !skipped_blocks_lock_); 93 94 void CaptureRssAtPeak() REQUIRES(!mark_stack_lock_); 95 void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_) 96 REQUIRES(!Locks::heap_bitmap_lock_); GetGcType()97 GcType GetGcType() const override { 98 return (use_generational_cc_ && young_gen_) 99 ? kGcTypeSticky 100 : kGcTypePartial; 101 } GetCollectorType()102 CollectorType GetCollectorType() const override { 103 return kCollectorTypeCC; 104 } 105 void RevokeAllThreadLocalBuffers() override; 106 // Creates inter-region ref bitmaps for region-space and non-moving-space. 107 // Gets called in Heap construction after the two spaces are created. 108 void CreateInterRegionRefBitmaps(); SetRegionSpace(space::RegionSpace * region_space)109 void SetRegionSpace(space::RegionSpace* region_space) { 110 DCHECK(region_space != nullptr); 111 region_space_ = region_space; 112 } RegionSpace()113 space::RegionSpace* RegionSpace() { 114 return region_space_; 115 } 116 // Assert the to-space invariant for a heap reference `ref` held in `obj` at offset `offset`. 117 void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref) 118 REQUIRES_SHARED(Locks::mutator_lock_); 119 // Assert the to-space invariant for a GC root reference `ref`. 120 void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref) 121 REQUIRES_SHARED(Locks::mutator_lock_); IsInToSpace(mirror::Object * ref)122 bool IsInToSpace(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) { 123 DCHECK(ref != nullptr); 124 return IsMarked(ref) == ref; 125 } 126 // Mark object `from_ref`, copying it to the to-space if needed. 127 template<bool kGrayImmuneObject = true, bool kNoUnEvac = false, bool kFromGCThread = false> 128 ALWAYS_INLINE mirror::Object* Mark(Thread* const self, 129 mirror::Object* from_ref, 130 mirror::Object* holder = nullptr, 131 MemberOffset offset = MemberOffset(0)) 132 REQUIRES_SHARED(Locks::mutator_lock_) 133 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); 134 ALWAYS_INLINE mirror::Object* MarkFromReadBarrier(mirror::Object* from_ref) 135 REQUIRES_SHARED(Locks::mutator_lock_) 136 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); IsMarking()137 bool IsMarking() const { 138 return is_marking_; 139 } 140 // We may want to use read barrier entrypoints before is_marking_ is true since concurrent graying 141 // creates a small window where we might dispatch on these entrypoints. IsUsingReadBarrierEntrypoints()142 bool IsUsingReadBarrierEntrypoints() const { 143 return is_using_read_barrier_entrypoints_; 144 } IsActive()145 bool IsActive() const { 146 return is_active_; 147 } GetBarrier()148 Barrier& GetBarrier() { 149 return *gc_barrier_; 150 } IsWeakRefAccessEnabled()151 bool IsWeakRefAccessEnabled() REQUIRES(Locks::thread_list_lock_) { 152 return weak_ref_access_enabled_; 153 } 154 void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES(!mark_stack_lock_); 155 156 // Blindly return the forwarding pointer from the lockword, or null if there is none. 157 static mirror::Object* GetFwdPtrUnchecked(mirror::Object* from_ref) 158 REQUIRES_SHARED(Locks::mutator_lock_); 159 160 // If marked, return the to-space object, otherwise null. 161 mirror::Object* IsMarked(mirror::Object* from_ref) override 162 REQUIRES_SHARED(Locks::mutator_lock_); 163 164 void AssertNoThreadMarkStackMapping(Thread* thread) REQUIRES(!mark_stack_lock_); 165 // Dump information about reference `ref` and return it as a string. 166 // Use `ref_name` to name the reference in messages. Each message is prefixed with `indent`. 167 std::string DumpReferenceInfo(mirror::Object* ref, const char* ref_name, const char* indent = "") 168 REQUIRES_SHARED(Locks::mutator_lock_); 169 170 private: 171 EXPORT void PushOntoMarkStack(Thread* const self, mirror::Object* obj) 172 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); 173 // Returns a to-space copy of the from-space object from_ref, and atomically installs a 174 // forwarding pointer. Ensures that the forwarding reference is visible to other threads before 175 // the returned to-space pointer becomes visible to them. 176 EXPORT mirror::Object* Copy(Thread* const self, 177 mirror::Object* from_ref, 178 mirror::Object* holder, 179 MemberOffset offset) REQUIRES_SHARED(Locks::mutator_lock_) 180 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); 181 // Scan the reference fields of object `to_ref`. 182 template <bool kNoUnEvac> 183 void Scan(mirror::Object* to_ref, size_t obj_size = 0) REQUIRES_SHARED(Locks::mutator_lock_) 184 REQUIRES(!mark_stack_lock_); 185 // Scan the reference fields of object 'obj' in the dirty cards during 186 // card-table scan. In addition to visiting the references, it also sets the 187 // read-barrier state to gray for Reference-type objects to ensure that 188 // GetReferent() called on these objects calls the read-barrier on the referent. 189 template <bool kNoUnEvac> 190 void ScanDirtyObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) 191 REQUIRES(!mark_stack_lock_); 192 // Process a field. 193 template <bool kNoUnEvac> 194 void Process(mirror::Object* obj, MemberOffset offset) 195 REQUIRES_SHARED(Locks::mutator_lock_) 196 REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_, !immune_gray_stack_lock_); 197 void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override 198 REQUIRES_SHARED(Locks::mutator_lock_) 199 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); 200 template<bool kGrayImmuneObject> 201 void MarkRoot(Thread* const self, mirror::CompressedReference<mirror::Object>* root) 202 REQUIRES_SHARED(Locks::mutator_lock_) 203 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); 204 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, 205 size_t count, 206 const RootInfo& info) override 207 REQUIRES_SHARED(Locks::mutator_lock_) 208 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); 209 void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_); 210 accounting::ObjectStack* GetAllocationStack(); 211 accounting::ObjectStack* GetLiveStack(); 212 void ProcessMarkStack() override REQUIRES_SHARED(Locks::mutator_lock_) 213 REQUIRES(!mark_stack_lock_); 214 bool ProcessMarkStackOnce() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); 215 void ProcessMarkStackRef(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_) 216 REQUIRES(!mark_stack_lock_); 217 void GrayAllDirtyImmuneObjects() 218 REQUIRES(Locks::mutator_lock_) 219 REQUIRES(!mark_stack_lock_); 220 void GrayAllNewlyDirtyImmuneObjects() 221 REQUIRES(Locks::mutator_lock_) 222 REQUIRES(!mark_stack_lock_); 223 void VerifyGrayImmuneObjects() 224 REQUIRES(Locks::mutator_lock_) 225 REQUIRES(!mark_stack_lock_); 226 void VerifyNoMissingCardMarks() 227 REQUIRES(Locks::mutator_lock_) 228 REQUIRES(!mark_stack_lock_); 229 template <typename Processor> 230 size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access, 231 Closure* checkpoint_callback, 232 const Processor& processor) 233 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); 234 void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback) 235 REQUIRES_SHARED(Locks::mutator_lock_); 236 void SwitchToSharedMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_) 237 REQUIRES(!mark_stack_lock_); 238 void SwitchToGcExclusiveMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_); 239 void DelayReferenceReferent(ObjPtr<mirror::Class> klass, 240 ObjPtr<mirror::Reference> reference) override 241 REQUIRES_SHARED(Locks::mutator_lock_); 242 void ProcessReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); 243 mirror::Object* MarkObject(mirror::Object* from_ref) override 244 REQUIRES_SHARED(Locks::mutator_lock_) 245 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); 246 void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref, 247 bool do_atomic_update) override 248 REQUIRES_SHARED(Locks::mutator_lock_) 249 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); 250 bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref) 251 REQUIRES_SHARED(Locks::mutator_lock_); 252 bool IsMarkedInNonMovingSpace(mirror::Object* from_ref) 253 REQUIRES_SHARED(Locks::mutator_lock_); 254 bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field, 255 bool do_atomic_update) override 256 REQUIRES_SHARED(Locks::mutator_lock_); 257 void SweepSystemWeaks(Thread* self) 258 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_); 259 // Sweep unmarked objects to complete the garbage collection. Full GCs sweep 260 // all allocation spaces (except the region space). Sticky-bit GCs just sweep 261 // a subset of the heap. 262 void Sweep(bool swap_bitmaps) 263 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_); 264 // Sweep only pointers within an array. 265 void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps) 266 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_); 267 void SweepLargeObjects(bool swap_bitmaps) 268 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_); 269 void MarkZygoteLargeObjects() 270 REQUIRES_SHARED(Locks::mutator_lock_); 271 void FillWithFakeObject(Thread* const self, mirror::Object* fake_obj, size_t byte_size) 272 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_) 273 REQUIRES_SHARED(Locks::mutator_lock_); 274 mirror::Object* AllocateInSkippedBlock(Thread* const self, size_t alloc_size) 275 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_) 276 REQUIRES_SHARED(Locks::mutator_lock_); 277 void CheckEmptyMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); 278 void IssueEmptyCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_); 279 bool IsOnAllocStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_); 280 // Return the forwarding pointer from the lockword. The argument must be in from space. 281 mirror::Object* GetFwdPtr(mirror::Object* from_ref) REQUIRES_SHARED(Locks::mutator_lock_); 282 void FlipThreadRoots() REQUIRES(!Locks::mutator_lock_); 283 void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_); 284 void RecordLiveStackFreezeSize(Thread* self); 285 void ComputeUnevacFromSpaceLiveRatio(); 286 void LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) 287 REQUIRES_SHARED(Locks::mutator_lock_); 288 // Dump information about heap reference `ref`, referenced from object `obj` at offset `offset`, 289 // and return it as a string. 290 EXPORT std::string DumpHeapReference(mirror::Object* obj, 291 MemberOffset offset, 292 mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_); 293 // Dump information about GC root `ref` and return it as a string. 294 std::string DumpGcRoot(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_); 295 void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref) 296 REQUIRES_SHARED(Locks::mutator_lock_); 297 void ReenableWeakRefAccess(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); 298 void DisableMarking() REQUIRES_SHARED(Locks::mutator_lock_); 299 void IssueDisableMarkingCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_); 300 void ExpandGcMarkStack() REQUIRES_SHARED(Locks::mutator_lock_); 301 EXPORT mirror::Object* MarkNonMoving(Thread* const self, 302 mirror::Object* from_ref, 303 mirror::Object* holder = nullptr, 304 MemberOffset offset = MemberOffset(0)) 305 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); 306 ALWAYS_INLINE mirror::Object* MarkUnevacFromSpaceRegion(Thread* const self, 307 mirror::Object* from_ref, 308 accounting::SpaceBitmap<kObjectAlignment>* bitmap) 309 REQUIRES_SHARED(Locks::mutator_lock_) 310 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); 311 template<bool kGrayImmuneObject> 312 ALWAYS_INLINE mirror::Object* MarkImmuneSpace(Thread* const self, 313 mirror::Object* from_ref) 314 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!immune_gray_stack_lock_); 315 void ScanImmuneObject(mirror::Object* obj) 316 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); 317 EXPORT mirror::Object* MarkFromReadBarrierWithMeasurements(Thread* const self, 318 mirror::Object* from_ref) 319 REQUIRES_SHARED(Locks::mutator_lock_) 320 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); 321 void DumpPerformanceInfo(std::ostream& os) override REQUIRES(!rb_slow_path_histogram_lock_); 322 // Set the read barrier mark entrypoints to non-null. 323 void ActivateReadBarrierEntrypoints(); 324 325 void CaptureThreadRootsForMarking() REQUIRES_SHARED(Locks::mutator_lock_); 326 void AddLiveBytesAndScanRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_); 327 bool TestMarkBitmapForRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_); 328 template <bool kAtomic = false> 329 bool TestAndSetMarkBitForRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_); 330 void PushOntoLocalMarkStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_); 331 void ProcessMarkStackForMarkingAndComputeLiveBytes() REQUIRES_SHARED(Locks::mutator_lock_) 332 REQUIRES(!mark_stack_lock_); 333 334 void RemoveThreadMarkStackMapping(Thread* thread, accounting::ObjectStack* tl_mark_stack) 335 REQUIRES(mark_stack_lock_); 336 void AddThreadMarkStackMapping(Thread* thread, accounting::ObjectStack* tl_mark_stack) 337 REQUIRES(mark_stack_lock_); 338 void AssertEmptyThreadMarkStackMap() REQUIRES(mark_stack_lock_); 339 340 space::RegionSpace* region_space_; // The underlying region space. 341 std::unique_ptr<Barrier> gc_barrier_; 342 std::unique_ptr<accounting::ObjectStack> gc_mark_stack_; 343 344 // If true, enable generational collection when using the Concurrent Copying 345 // (CC) collector, i.e. use sticky-bit CC for minor collections and (full) CC 346 // for major collections. Generational CC collection is currently only 347 // compatible with Baker read barriers. Set in Heap constructor. 348 const bool use_generational_cc_; 349 350 // Generational "sticky", only trace through dirty objects in region space. 351 const bool young_gen_; 352 353 // If true, the GC thread is done scanning marked objects on dirty and aged 354 // card (see ConcurrentCopying::CopyingPhase). 355 Atomic<bool> done_scanning_; 356 357 // The read-barrier mark-bit stack. Stores object references whose 358 // mark bit has been set by ConcurrentCopying::MarkFromReadBarrier, 359 // so that this bit can be reset at the end of the collection in 360 // ConcurrentCopying::FinishPhase. The mark bit of an object can be 361 // used by mutator read barrier code to quickly test whether that 362 // object has been already marked. 363 std::unique_ptr<accounting::ObjectStack> rb_mark_bit_stack_; 364 // Thread-unsafe Boolean value hinting that `rb_mark_bit_stack_` is 365 // full. A thread-safe test of whether the read-barrier mark-bit 366 // stack is full is implemented by `rb_mark_bit_stack_->AtomicPushBack(ref)` 367 // (see use case in ConcurrentCopying::MarkFromReadBarrier). 368 bool rb_mark_bit_stack_full_; 369 370 // Guards access to pooled_mark_stacks_ and revoked_mark_stacks_ vectors. 371 // Also guards destruction and revocations of thread-local mark-stacks. 372 // Clearing thread-local mark-stack (by other threads or during destruction) 373 // should be guarded by it. 374 Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 375 std::vector<accounting::ObjectStack*> revoked_mark_stacks_ 376 GUARDED_BY(mark_stack_lock_); 377 // Size of thread local mark stack. GetMarkStackSize()378 static size_t GetMarkStackSize() { 379 return gPageSize; 380 } 381 static constexpr size_t kMarkStackPoolSize = 256; 382 std::vector<accounting::ObjectStack*> pooled_mark_stacks_ 383 GUARDED_BY(mark_stack_lock_); 384 Thread* thread_running_gc_; 385 bool is_marking_; // True while marking is ongoing. 386 // True while we might dispatch on the read barrier entrypoints. 387 bool is_using_read_barrier_entrypoints_; 388 bool is_active_; // True while the collection is ongoing. 389 bool is_asserting_to_space_invariant_; // True while asserting the to-space invariant. 390 ImmuneSpaces immune_spaces_; 391 accounting::ContinuousSpaceBitmap* region_space_bitmap_; 392 // A cache of Heap::GetMarkBitmap(). 393 accounting::HeapBitmap* heap_mark_bitmap_; 394 size_t live_stack_freeze_size_; 395 size_t from_space_num_bytes_at_first_pause_; // Computed if kEnableFromSpaceAccountingCheck 396 Atomic<int> is_mark_stack_push_disallowed_; // Debug only. 397 enum MarkStackMode { 398 kMarkStackModeOff = 0, // Mark stack is off. 399 kMarkStackModeThreadLocal, // All threads except for the GC-running thread push refs onto 400 // thread-local mark stacks. The GC-running thread pushes onto and 401 // pops off the GC mark stack without a lock. 402 kMarkStackModeShared, // All threads share the GC mark stack with a lock. 403 kMarkStackModeGcExclusive // The GC-running thread pushes onto and pops from the GC mark stack 404 // without a lock. Other threads won't access the mark stack. 405 }; 406 // mark_stack_mode_ is updated asynchronoulsy by the GC. We cannot assume that another thread 407 // has seen it until it has run some kind of checkpoint. We generally access this using 408 // acquire/release ordering, to ensure that any relevant prior changes are visible to readers of 409 // the flag, and to ensure that CHECKs prior to a state change cannot be delayed past the state 410 // change. 411 Atomic<MarkStackMode> mark_stack_mode_; 412 bool weak_ref_access_enabled_ GUARDED_BY(Locks::thread_list_lock_); 413 414 // How many objects and bytes we moved. The GC thread moves many more objects 415 // than mutators. Therefore, we separate the two to avoid CAS. Bytes_moved_ and 416 // bytes_moved_gc_thread_ are critical for GC triggering; the others are just informative. 417 Atomic<size_t> bytes_moved_; // Used by mutators 418 Atomic<size_t> objects_moved_; // Used by mutators 419 420 // copied_live_bytes_ratio_sum_ is read and written by CC per GC, in 421 // ReclaimPhase, and is read by DumpPerformanceInfo (potentially from another 422 // thread). However, at present, DumpPerformanceInfo is only called when the 423 // runtime shuts down, so no concurrent access. The same reasoning goes for 424 // gc_count_ and reclaimed_bytes_ratio_sum_ 425 426 // The sum of of all copied live bytes ratio (to_bytes/from_bytes) 427 float copied_live_bytes_ratio_sum_; 428 // The number of GC counts, used to calculate the average above. (It doesn't 429 // include GC where from_bytes is zero, IOW, from-space is empty, which is 430 // possible for minor GC if all allocated objects are in non-moving 431 // space.) 432 size_t gc_count_; 433 // Bit is set if the corresponding object has inter-region references that 434 // were found during the marking phase of two-phase full-heap GC cycle. 435 accounting::ContinuousSpaceBitmap region_space_inter_region_bitmap_; 436 accounting::ContinuousSpaceBitmap non_moving_space_inter_region_bitmap_; 437 438 // reclaimed_bytes_ratio = reclaimed_bytes/num_allocated_bytes per GC cycle 439 float reclaimed_bytes_ratio_sum_; 440 441 // Used only by GC thread, so need not be atomic. Also, should be kept 442 // in a different cacheline than bytes/objects_moved_ (above) to avoid false 443 // cacheline sharing. 444 size_t bytes_moved_gc_thread_; 445 size_t objects_moved_gc_thread_; 446 uint64_t bytes_scanned_; 447 uint64_t cumulative_bytes_moved_; 448 449 // The skipped blocks are memory blocks/chucks that were copies of 450 // objects that were unused due to lost races (cas failures) at 451 // object copy/forward pointer install. They may be reused. 452 // Skipped blocks are always in region space. Their size is included directly 453 // in num_bytes_allocated_, i.e. they are treated as allocated, but may be directly 454 // used without going through a GC cycle like other objects. They are reused only 455 // if we run out of region space. TODO: Revisit this design. 456 Mutex skipped_blocks_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 457 std::multimap<size_t, uint8_t*> skipped_blocks_map_ GUARDED_BY(skipped_blocks_lock_); 458 Atomic<size_t> to_space_bytes_skipped_; 459 Atomic<size_t> to_space_objects_skipped_; 460 461 // If measure_read_barrier_slow_path_ is true, we count how long is spent in MarkFromReadBarrier 462 // and also log. 463 bool measure_read_barrier_slow_path_; 464 // mark_from_read_barrier_measurements_ is true if systrace is enabled or 465 // measure_read_barrier_time_ is true. 466 bool mark_from_read_barrier_measurements_; 467 Atomic<uint64_t> rb_slow_path_ns_; 468 Atomic<uint64_t> rb_slow_path_count_; 469 Atomic<uint64_t> rb_slow_path_count_gc_; 470 mutable Mutex rb_slow_path_histogram_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 471 Histogram<uint64_t> rb_slow_path_time_histogram_ GUARDED_BY(rb_slow_path_histogram_lock_); 472 uint64_t rb_slow_path_count_total_ GUARDED_BY(rb_slow_path_histogram_lock_); 473 uint64_t rb_slow_path_count_gc_total_ GUARDED_BY(rb_slow_path_histogram_lock_); 474 475 accounting::ReadBarrierTable* rb_table_; 476 bool force_evacuate_all_; // True if all regions are evacuated. 477 Atomic<bool> updated_all_immune_objects_; 478 bool gc_grays_immune_objects_; 479 Mutex immune_gray_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 480 std::vector<mirror::Object*> immune_gray_stack_ GUARDED_BY(immune_gray_stack_lock_); 481 482 // Class of java.lang.Object. Filled in from WellKnownClasses in FlipCallback. Must 483 // be filled in before flipping thread roots so that FillWithFakeObject can run. Not 484 // ObjPtr since the GC may transition to suspended and runnable between phases. 485 mirror::Class* java_lang_Object_; 486 487 // Sweep array free buffer, used to sweep the spaces based on an array more 488 // efficiently, by recording dead objects to be freed in batches (see 489 // ConcurrentCopying::SweepArray). 490 MemMap sweep_array_free_buffer_mem_map_; 491 492 // Use signed because after_gc may be larger than before_gc. 493 int64_t num_bytes_allocated_before_gc_; 494 495 class ActivateReadBarrierEntrypointsCallback; 496 class ActivateReadBarrierEntrypointsCheckpoint; 497 class AssertToSpaceInvariantFieldVisitor; 498 class AssertToSpaceInvariantRefsVisitor; 499 class ClearBlackPtrsVisitor; 500 class ComputeUnevacFromSpaceLiveRatioVisitor; 501 class DisableMarkingCallback; 502 class DisableMarkingCheckpoint; 503 class DisableWeakRefAccessCallback; 504 class FlipCallback; 505 template <bool kConcurrent> class GrayImmuneObjectVisitor; 506 class ImmuneSpaceScanObjVisitor; 507 class LostCopyVisitor; 508 template <bool kNoUnEvac> class RefFieldsVisitor; 509 class RevokeThreadLocalMarkStackCheckpoint; 510 class ScopedGcGraysImmuneObjects; 511 class ThreadFlipVisitor; 512 class VerifyGrayImmuneObjectsVisitor; 513 class VerifyNoFromSpaceRefsFieldVisitor; 514 class VerifyNoFromSpaceRefsVisitor; 515 class VerifyNoMissingCardMarkVisitor; 516 class ImmuneSpaceCaptureRefsVisitor; 517 template <bool kAtomicTestAndSet = false> class CaptureRootsForMarkingVisitor; 518 class CaptureThreadRootsForMarkingAndCheckpoint; 519 template <bool kHandleInterRegionRefs> class ComputeLiveBytesAndMarkRefFieldsVisitor; 520 521 DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying); 522 }; 523 524 } // namespace collector 525 } // namespace gc 526 } // namespace art 527 528 #endif // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_ 529