• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
18 #define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
19 
20 #include "garbage_collector.h"
21 #include "gc/accounting/space_bitmap.h"
22 #include "immune_spaces.h"
23 #include "offsets.h"
24 
25 #include <map>
26 #include <memory>
27 #include <unordered_map>
28 #include <vector>
29 
30 namespace art {
31 class Barrier;
32 class Closure;
33 class RootInfo;
34 
35 namespace mirror {
36 template<class MirrorType> class CompressedReference;
37 template<class MirrorType> class HeapReference;
38 class Object;
39 }  // namespace mirror
40 
41 namespace gc {
42 
43 namespace accounting {
44 template<typename T> class AtomicStack;
45 using ObjectStack = AtomicStack<mirror::Object>;
46 template <size_t kAlignment> class SpaceBitmap;
47 using ContinuousSpaceBitmap = SpaceBitmap<kObjectAlignment>;
48 class HeapBitmap;
49 class ReadBarrierTable;
50 }  // namespace accounting
51 
52 namespace space {
53 class RegionSpace;
54 }  // namespace space
55 
56 namespace collector {
57 
58 class ConcurrentCopying : public GarbageCollector {
59  public:
60   // Enable the no-from-space-refs verification at the pause.
61   static constexpr bool kEnableNoFromSpaceRefsVerification = kIsDebugBuild;
62   // Enable the from-space bytes/objects check.
63   static constexpr bool kEnableFromSpaceAccountingCheck = kIsDebugBuild;
64   // Enable verbose mode.
65   static constexpr bool kVerboseMode = false;
66   // If kGrayDirtyImmuneObjects is true then we gray dirty objects in the GC pause to prevent dirty
67   // pages.
68   static constexpr bool kGrayDirtyImmuneObjects = true;
69 
70   ConcurrentCopying(Heap* heap,
71                     bool young_gen,
72                     bool use_generational_cc,
73                     const std::string& name_prefix = "",
74                     bool measure_read_barrier_slow_path = false);
75   ~ConcurrentCopying();
76 
77   void RunPhases() override
78       REQUIRES(!immune_gray_stack_lock_,
79                !mark_stack_lock_,
80                !rb_slow_path_histogram_lock_,
81                !skipped_blocks_lock_);
82   void InitializePhase() REQUIRES_SHARED(Locks::mutator_lock_)
83       REQUIRES(!mark_stack_lock_, !immune_gray_stack_lock_);
84   void MarkingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
85       REQUIRES(!mark_stack_lock_);
86   void CopyingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
87       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
88   void ReclaimPhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
89   void FinishPhase() REQUIRES(!mark_stack_lock_,
90                               !rb_slow_path_histogram_lock_,
91                               !skipped_blocks_lock_);
92 
93   void CaptureRssAtPeak() REQUIRES(!mark_stack_lock_);
94   void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
95       REQUIRES(!Locks::heap_bitmap_lock_);
GetGcType()96   GcType GetGcType() const override {
97     return (use_generational_cc_ && young_gen_)
98         ? kGcTypeSticky
99         : kGcTypePartial;
100   }
GetCollectorType()101   CollectorType GetCollectorType() const override {
102     return kCollectorTypeCC;
103   }
104   void RevokeAllThreadLocalBuffers() override;
105   // Creates inter-region ref bitmaps for region-space and non-moving-space.
106   // Gets called in Heap construction after the two spaces are created.
107   void CreateInterRegionRefBitmaps();
SetRegionSpace(space::RegionSpace * region_space)108   void SetRegionSpace(space::RegionSpace* region_space) {
109     DCHECK(region_space != nullptr);
110     region_space_ = region_space;
111   }
RegionSpace()112   space::RegionSpace* RegionSpace() {
113     return region_space_;
114   }
115   // Assert the to-space invariant for a heap reference `ref` held in `obj` at offset `offset`.
116   void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref)
117       REQUIRES_SHARED(Locks::mutator_lock_);
118   // Assert the to-space invariant for a GC root reference `ref`.
119   void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref)
120       REQUIRES_SHARED(Locks::mutator_lock_);
IsInToSpace(mirror::Object * ref)121   bool IsInToSpace(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) {
122     DCHECK(ref != nullptr);
123     return IsMarked(ref) == ref;
124   }
125   // Mark object `from_ref`, copying it to the to-space if needed.
126   template<bool kGrayImmuneObject = true, bool kNoUnEvac = false, bool kFromGCThread = false>
127   ALWAYS_INLINE mirror::Object* Mark(Thread* const self,
128                                      mirror::Object* from_ref,
129                                      mirror::Object* holder = nullptr,
130                                      MemberOffset offset = MemberOffset(0))
131       REQUIRES_SHARED(Locks::mutator_lock_)
132       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
133   ALWAYS_INLINE mirror::Object* MarkFromReadBarrier(mirror::Object* from_ref)
134       REQUIRES_SHARED(Locks::mutator_lock_)
135       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
IsMarking()136   bool IsMarking() const {
137     return is_marking_;
138   }
139   // We may want to use read barrier entrypoints before is_marking_ is true since concurrent graying
140   // creates a small window where we might dispatch on these entrypoints.
IsUsingReadBarrierEntrypoints()141   bool IsUsingReadBarrierEntrypoints() const {
142     return is_using_read_barrier_entrypoints_;
143   }
IsActive()144   bool IsActive() const {
145     return is_active_;
146   }
GetBarrier()147   Barrier& GetBarrier() {
148     return *gc_barrier_;
149   }
IsWeakRefAccessEnabled()150   bool IsWeakRefAccessEnabled() REQUIRES(Locks::thread_list_lock_) {
151     return weak_ref_access_enabled_;
152   }
153   void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES(!mark_stack_lock_);
154 
155   // Blindly return the forwarding pointer from the lockword, or null if there is none.
156   static mirror::Object* GetFwdPtrUnchecked(mirror::Object* from_ref)
157       REQUIRES_SHARED(Locks::mutator_lock_);
158 
159   // If marked, return the to-space object, otherwise null.
160   mirror::Object* IsMarked(mirror::Object* from_ref) override
161       REQUIRES_SHARED(Locks::mutator_lock_);
162 
163   void AssertNoThreadMarkStackMapping(Thread* thread) REQUIRES(!mark_stack_lock_);
164   // Dump information about reference `ref` and return it as a string.
165   // Use `ref_name` to name the reference in messages. Each message is prefixed with `indent`.
166   std::string DumpReferenceInfo(mirror::Object* ref, const char* ref_name, const char* indent = "")
167       REQUIRES_SHARED(Locks::mutator_lock_);
168 
169  private:
170   void PushOntoMarkStack(Thread* const self, mirror::Object* obj)
171       REQUIRES_SHARED(Locks::mutator_lock_)
172       REQUIRES(!mark_stack_lock_);
173   // Returns a to-space copy of the from-space object from_ref, and atomically installs a
174   // forwarding pointer. Ensures that the forwarding reference is visible to other threads before
175   // the returned to-space pointer becomes visible to them.
176   mirror::Object* Copy(Thread* const self,
177                        mirror::Object* from_ref,
178                        mirror::Object* holder,
179                        MemberOffset offset)
180       REQUIRES_SHARED(Locks::mutator_lock_)
181       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
182   // Scan the reference fields of object `to_ref`.
183   template <bool kNoUnEvac>
184   void Scan(mirror::Object* to_ref, size_t obj_size = 0) REQUIRES_SHARED(Locks::mutator_lock_)
185       REQUIRES(!mark_stack_lock_);
186   // Scan the reference fields of object 'obj' in the dirty cards during
187   // card-table scan. In addition to visiting the references, it also sets the
188   // read-barrier state to gray for Reference-type objects to ensure that
189   // GetReferent() called on these objects calls the read-barrier on the referent.
190   template <bool kNoUnEvac>
191   void ScanDirtyObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
192       REQUIRES(!mark_stack_lock_);
193   // Process a field.
194   template <bool kNoUnEvac>
195   void Process(mirror::Object* obj, MemberOffset offset)
196       REQUIRES_SHARED(Locks::mutator_lock_)
197       REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_, !immune_gray_stack_lock_);
198   void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
199       REQUIRES_SHARED(Locks::mutator_lock_)
200       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
201   template<bool kGrayImmuneObject>
202   void MarkRoot(Thread* const self, mirror::CompressedReference<mirror::Object>* root)
203       REQUIRES_SHARED(Locks::mutator_lock_)
204       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
205   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
206                   size_t count,
207                   const RootInfo& info) override
208       REQUIRES_SHARED(Locks::mutator_lock_)
209       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
210   void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_);
211   accounting::ObjectStack* GetAllocationStack();
212   accounting::ObjectStack* GetLiveStack();
213   void ProcessMarkStack() override REQUIRES_SHARED(Locks::mutator_lock_)
214       REQUIRES(!mark_stack_lock_);
215   bool ProcessMarkStackOnce() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
216   void ProcessMarkStackRef(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
217       REQUIRES(!mark_stack_lock_);
218   void GrayAllDirtyImmuneObjects()
219       REQUIRES(Locks::mutator_lock_)
220       REQUIRES(!mark_stack_lock_);
221   void GrayAllNewlyDirtyImmuneObjects()
222       REQUIRES(Locks::mutator_lock_)
223       REQUIRES(!mark_stack_lock_);
224   void VerifyGrayImmuneObjects()
225       REQUIRES(Locks::mutator_lock_)
226       REQUIRES(!mark_stack_lock_);
227   void VerifyNoMissingCardMarks()
228       REQUIRES(Locks::mutator_lock_)
229       REQUIRES(!mark_stack_lock_);
230   template <typename Processor>
231   size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access,
232                                       Closure* checkpoint_callback,
233                                       const Processor& processor)
234       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
235   void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
236       REQUIRES_SHARED(Locks::mutator_lock_);
237   void SwitchToSharedMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_)
238       REQUIRES(!mark_stack_lock_);
239   void SwitchToGcExclusiveMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_);
240   void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
241                               ObjPtr<mirror::Reference> reference) override
242       REQUIRES_SHARED(Locks::mutator_lock_);
243   void ProcessReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
244   mirror::Object* MarkObject(mirror::Object* from_ref) override
245       REQUIRES_SHARED(Locks::mutator_lock_)
246       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
247   void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref,
248                          bool do_atomic_update) override
249       REQUIRES_SHARED(Locks::mutator_lock_)
250       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
251   bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
252       REQUIRES_SHARED(Locks::mutator_lock_);
253   bool IsMarkedInNonMovingSpace(mirror::Object* from_ref)
254       REQUIRES_SHARED(Locks::mutator_lock_);
255   bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
256                                    bool do_atomic_update) override
257       REQUIRES_SHARED(Locks::mutator_lock_);
258   void SweepSystemWeaks(Thread* self)
259       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
260   // Sweep unmarked objects to complete the garbage collection. Full GCs sweep
261   // all allocation spaces (except the region space). Sticky-bit GCs just sweep
262   // a subset of the heap.
263   void Sweep(bool swap_bitmaps)
264       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
265   // Sweep only pointers within an array.
266   void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps)
267       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
268   void SweepLargeObjects(bool swap_bitmaps)
269       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
270   void MarkZygoteLargeObjects()
271       REQUIRES_SHARED(Locks::mutator_lock_);
272   void FillWithFakeObject(Thread* const self, mirror::Object* fake_obj, size_t byte_size)
273       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
274       REQUIRES_SHARED(Locks::mutator_lock_);
275   mirror::Object* AllocateInSkippedBlock(Thread* const self, size_t alloc_size)
276       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
277       REQUIRES_SHARED(Locks::mutator_lock_);
278   void CheckEmptyMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
279   void IssueEmptyCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
280   bool IsOnAllocStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
281   // Return the forwarding pointer from the lockword. The argument must be in from space.
282   mirror::Object* GetFwdPtr(mirror::Object* from_ref) REQUIRES_SHARED(Locks::mutator_lock_);
283   void FlipThreadRoots() REQUIRES(!Locks::mutator_lock_);
284   void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_);
285   void RecordLiveStackFreezeSize(Thread* self);
286   void ComputeUnevacFromSpaceLiveRatio();
287   void LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset)
288       REQUIRES_SHARED(Locks::mutator_lock_);
289   // Dump information about heap reference `ref`, referenced from object `obj` at offset `offset`,
290   // and return it as a string.
291   std::string DumpHeapReference(mirror::Object* obj, MemberOffset offset, mirror::Object* ref)
292       REQUIRES_SHARED(Locks::mutator_lock_);
293   // Dump information about GC root `ref` and return it as a string.
294   std::string DumpGcRoot(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
295   void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref)
296       REQUIRES_SHARED(Locks::mutator_lock_);
297   void ReenableWeakRefAccess(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
298   void DisableMarking() REQUIRES_SHARED(Locks::mutator_lock_);
299   void IssueDisableMarkingCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
300   void ExpandGcMarkStack() REQUIRES_SHARED(Locks::mutator_lock_);
301   mirror::Object* MarkNonMoving(Thread* const self,
302                                 mirror::Object* from_ref,
303                                 mirror::Object* holder = nullptr,
304                                 MemberOffset offset = MemberOffset(0))
305       REQUIRES_SHARED(Locks::mutator_lock_)
306       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
307   ALWAYS_INLINE mirror::Object* MarkUnevacFromSpaceRegion(Thread* const self,
308       mirror::Object* from_ref,
309       accounting::SpaceBitmap<kObjectAlignment>* bitmap)
310       REQUIRES_SHARED(Locks::mutator_lock_)
311       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
312   template<bool kGrayImmuneObject>
313   ALWAYS_INLINE mirror::Object* MarkImmuneSpace(Thread* const self,
314                                                 mirror::Object* from_ref)
315       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!immune_gray_stack_lock_);
316   void ScanImmuneObject(mirror::Object* obj)
317       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
318   mirror::Object* MarkFromReadBarrierWithMeasurements(Thread* const self,
319                                                       mirror::Object* from_ref)
320       REQUIRES_SHARED(Locks::mutator_lock_)
321       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
322   void DumpPerformanceInfo(std::ostream& os) override REQUIRES(!rb_slow_path_histogram_lock_);
323   // Set the read barrier mark entrypoints to non-null.
324   void ActivateReadBarrierEntrypoints();
325 
326   void CaptureThreadRootsForMarking() REQUIRES_SHARED(Locks::mutator_lock_);
327   void AddLiveBytesAndScanRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
328   bool TestMarkBitmapForRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
329   template <bool kAtomic = false>
330   bool TestAndSetMarkBitForRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
331   void PushOntoLocalMarkStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
332   void ProcessMarkStackForMarkingAndComputeLiveBytes() REQUIRES_SHARED(Locks::mutator_lock_)
333       REQUIRES(!mark_stack_lock_);
334 
335   void RemoveThreadMarkStackMapping(Thread* thread, accounting::ObjectStack* tl_mark_stack)
336       REQUIRES(mark_stack_lock_);
337   void AddThreadMarkStackMapping(Thread* thread, accounting::ObjectStack* tl_mark_stack)
338       REQUIRES(mark_stack_lock_);
339   void AssertEmptyThreadMarkStackMap() REQUIRES(mark_stack_lock_);
340 
341   space::RegionSpace* region_space_;      // The underlying region space.
342   std::unique_ptr<Barrier> gc_barrier_;
343   std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
344 
345   // If true, enable generational collection when using the Concurrent Copying
346   // (CC) collector, i.e. use sticky-bit CC for minor collections and (full) CC
347   // for major collections. Generational CC collection is currently only
348   // compatible with Baker read barriers. Set in Heap constructor.
349   const bool use_generational_cc_;
350 
351   // Generational "sticky", only trace through dirty objects in region space.
352   const bool young_gen_;
353 
354   // If true, the GC thread is done scanning marked objects on dirty and aged
355   // card (see ConcurrentCopying::CopyingPhase).
356   Atomic<bool> done_scanning_;
357 
358   // The read-barrier mark-bit stack. Stores object references whose
359   // mark bit has been set by ConcurrentCopying::MarkFromReadBarrier,
360   // so that this bit can be reset at the end of the collection in
361   // ConcurrentCopying::FinishPhase. The mark bit of an object can be
362   // used by mutator read barrier code to quickly test whether that
363   // object has been already marked.
364   std::unique_ptr<accounting::ObjectStack> rb_mark_bit_stack_;
365   // Thread-unsafe Boolean value hinting that `rb_mark_bit_stack_` is
366   // full. A thread-safe test of whether the read-barrier mark-bit
367   // stack is full is implemented by `rb_mark_bit_stack_->AtomicPushBack(ref)`
368   // (see use case in ConcurrentCopying::MarkFromReadBarrier).
369   bool rb_mark_bit_stack_full_;
370 
371   // Guards access to pooled_mark_stacks_ and revoked_mark_stacks_ vectors.
372   // Also guards destruction and revocations of thread-local mark-stacks.
373   // Clearing thread-local mark-stack (by other threads or during destruction)
374   // should be guarded by it.
375   Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
376   std::vector<accounting::ObjectStack*> revoked_mark_stacks_
377       GUARDED_BY(mark_stack_lock_);
378   static constexpr size_t kMarkStackSize = kPageSize;
379   static constexpr size_t kMarkStackPoolSize = 256;
380   std::vector<accounting::ObjectStack*> pooled_mark_stacks_
381       GUARDED_BY(mark_stack_lock_);
382   Thread* thread_running_gc_;
383   bool is_marking_;                       // True while marking is ongoing.
384   // True while we might dispatch on the read barrier entrypoints.
385   bool is_using_read_barrier_entrypoints_;
386   bool is_active_;                        // True while the collection is ongoing.
387   bool is_asserting_to_space_invariant_;  // True while asserting the to-space invariant.
388   ImmuneSpaces immune_spaces_;
389   accounting::ContinuousSpaceBitmap* region_space_bitmap_;
390   // A cache of Heap::GetMarkBitmap().
391   accounting::HeapBitmap* heap_mark_bitmap_;
392   size_t live_stack_freeze_size_;
393   size_t from_space_num_objects_at_first_pause_;  // Computed if kEnableFromSpaceAccountingCheck
394   size_t from_space_num_bytes_at_first_pause_;  // Computed if kEnableFromSpaceAccountingCheck
395   Atomic<int> is_mark_stack_push_disallowed_;
396   enum MarkStackMode {
397     kMarkStackModeOff = 0,      // Mark stack is off.
398     kMarkStackModeThreadLocal,  // All threads except for the GC-running thread push refs onto
399                                 // thread-local mark stacks. The GC-running thread pushes onto and
400                                 // pops off the GC mark stack without a lock.
401     kMarkStackModeShared,       // All threads share the GC mark stack with a lock.
402     kMarkStackModeGcExclusive   // The GC-running thread pushes onto and pops from the GC mark stack
403                                 // without a lock. Other threads won't access the mark stack.
404   };
405   Atomic<MarkStackMode> mark_stack_mode_;
406   bool weak_ref_access_enabled_ GUARDED_BY(Locks::thread_list_lock_);
407 
408   // How many objects and bytes we moved. The GC thread moves many more objects
409   // than mutators.  Therefore, we separate the two to avoid CAS.  Bytes_moved_ and
410   // bytes_moved_gc_thread_ are critical for GC triggering; the others are just informative.
411   Atomic<size_t> bytes_moved_;  // Used by mutators
412   Atomic<size_t> objects_moved_;  // Used by mutators
413 
414   // copied_live_bytes_ratio_sum_ is read and written by CC per GC, in
415   // ReclaimPhase, and is read by DumpPerformanceInfo (potentially from another
416   // thread). However, at present, DumpPerformanceInfo is only called when the
417   // runtime shuts down, so no concurrent access. The same reasoning goes for
418   // gc_count_ and reclaimed_bytes_ratio_sum_
419 
420   // The sum of of all copied live bytes ratio (to_bytes/from_bytes)
421   float copied_live_bytes_ratio_sum_;
422   // The number of GC counts, used to calculate the average above. (It doesn't
423   // include GC where from_bytes is zero, IOW, from-space is empty, which is
424   // possible for minor GC if all allocated objects are in non-moving
425   // space.)
426   size_t gc_count_;
427   // Bit is set if the corresponding object has inter-region references that
428   // were found during the marking phase of two-phase full-heap GC cycle.
429   accounting::ContinuousSpaceBitmap region_space_inter_region_bitmap_;
430   accounting::ContinuousSpaceBitmap non_moving_space_inter_region_bitmap_;
431 
432   // reclaimed_bytes_ratio = reclaimed_bytes/num_allocated_bytes per GC cycle
433   float reclaimed_bytes_ratio_sum_;
434 
435   // Used only by GC thread, so need not be atomic. Also, should be kept
436   // in a different cacheline than bytes/objects_moved_ (above) to avoid false
437   // cacheline sharing.
438   size_t bytes_moved_gc_thread_;
439   size_t objects_moved_gc_thread_;
440   uint64_t bytes_scanned_;
441   uint64_t cumulative_bytes_moved_;
442   uint64_t cumulative_objects_moved_;
443 
444   // The skipped blocks are memory blocks/chucks that were copies of
445   // objects that were unused due to lost races (cas failures) at
446   // object copy/forward pointer install. They may be reused.
447   // Skipped blocks are always in region space. Their size is included directly
448   // in num_bytes_allocated_, i.e. they are treated as allocated, but may be directly
449   // used without going through a GC cycle like other objects. They are reused only
450   // if we run out of region space. TODO: Revisit this design.
451   Mutex skipped_blocks_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
452   std::multimap<size_t, uint8_t*> skipped_blocks_map_ GUARDED_BY(skipped_blocks_lock_);
453   Atomic<size_t> to_space_bytes_skipped_;
454   Atomic<size_t> to_space_objects_skipped_;
455 
456   // If measure_read_barrier_slow_path_ is true, we count how long is spent in MarkFromReadBarrier
457   // and also log.
458   bool measure_read_barrier_slow_path_;
459   // mark_from_read_barrier_measurements_ is true if systrace is enabled or
460   // measure_read_barrier_time_ is true.
461   bool mark_from_read_barrier_measurements_;
462   Atomic<uint64_t> rb_slow_path_ns_;
463   Atomic<uint64_t> rb_slow_path_count_;
464   Atomic<uint64_t> rb_slow_path_count_gc_;
465   mutable Mutex rb_slow_path_histogram_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
466   Histogram<uint64_t> rb_slow_path_time_histogram_ GUARDED_BY(rb_slow_path_histogram_lock_);
467   uint64_t rb_slow_path_count_total_ GUARDED_BY(rb_slow_path_histogram_lock_);
468   uint64_t rb_slow_path_count_gc_total_ GUARDED_BY(rb_slow_path_histogram_lock_);
469 
470   accounting::ReadBarrierTable* rb_table_;
471   bool force_evacuate_all_;  // True if all regions are evacuated.
472   Atomic<bool> updated_all_immune_objects_;
473   bool gc_grays_immune_objects_;
474   Mutex immune_gray_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
475   std::vector<mirror::Object*> immune_gray_stack_ GUARDED_BY(immune_gray_stack_lock_);
476 
477   // Class of java.lang.Object. Filled in from WellKnownClasses in FlipCallback. Must
478   // be filled in before flipping thread roots so that FillWithFakeObject can run. Not
479   // ObjPtr since the GC may transition to suspended and runnable between phases.
480   mirror::Class* java_lang_Object_;
481 
482   // Sweep array free buffer, used to sweep the spaces based on an array more
483   // efficiently, by recording dead objects to be freed in batches (see
484   // ConcurrentCopying::SweepArray).
485   MemMap sweep_array_free_buffer_mem_map_;
486 
487   // Use signed because after_gc may be larger than before_gc.
488   int64_t num_bytes_allocated_before_gc_;
489 
490   class ActivateReadBarrierEntrypointsCallback;
491   class ActivateReadBarrierEntrypointsCheckpoint;
492   class AssertToSpaceInvariantFieldVisitor;
493   class AssertToSpaceInvariantRefsVisitor;
494   class ClearBlackPtrsVisitor;
495   class ComputeUnevacFromSpaceLiveRatioVisitor;
496   class DisableMarkingCallback;
497   class DisableMarkingCheckpoint;
498   class DisableWeakRefAccessCallback;
499   class FlipCallback;
500   template <bool kConcurrent> class GrayImmuneObjectVisitor;
501   class ImmuneSpaceScanObjVisitor;
502   class LostCopyVisitor;
503   template <bool kNoUnEvac> class RefFieldsVisitor;
504   class RevokeThreadLocalMarkStackCheckpoint;
505   class ScopedGcGraysImmuneObjects;
506   class ThreadFlipVisitor;
507   class VerifyGrayImmuneObjectsVisitor;
508   class VerifyNoFromSpaceRefsFieldVisitor;
509   class VerifyNoFromSpaceRefsVisitor;
510   class VerifyNoMissingCardMarkVisitor;
511   class ImmuneSpaceCaptureRefsVisitor;
512   template <bool kAtomicTestAndSet = false> class CaptureRootsForMarkingVisitor;
513   class CaptureThreadRootsForMarkingAndCheckpoint;
514   template <bool kHandleInterRegionRefs> class ComputeLiveBytesAndMarkRefFieldsVisitor;
515 
516   DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
517 };
518 
519 }  // namespace collector
520 }  // namespace gc
521 }  // namespace art
522 
523 #endif  // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
524