• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
18 #define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
19 
20 #include "barrier.h"
21 #include "garbage_collector.h"
22 #include "immune_spaces.h"
23 #include "jni.h"
24 #include "offsets.h"
25 #include "mirror/object_reference.h"
26 #include "safe_map.h"
27 
28 #include <unordered_map>
29 #include <vector>
30 
31 namespace art {
32 class Closure;
33 class RootInfo;
34 
35 namespace mirror {
36 class Object;
37 }  // namespace mirror
38 
39 namespace gc {
40 
41 namespace accounting {
42   template<typename T> class AtomicStack;
43   typedef AtomicStack<mirror::Object> ObjectStack;
44   template <size_t kAlignment> class SpaceBitmap;
45   typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap;
46   class HeapBitmap;
47   class ReadBarrierTable;
48 }  // namespace accounting
49 
50 namespace space {
51   class RegionSpace;
52 }  // namespace space
53 
54 namespace collector {
55 
56 class ConcurrentCopying : public GarbageCollector {
57  public:
58   // Enable the no-from-space-refs verification at the pause.
59   static constexpr bool kEnableNoFromSpaceRefsVerification = kIsDebugBuild;
60   // Enable the from-space bytes/objects check.
61   static constexpr bool kEnableFromSpaceAccountingCheck = kIsDebugBuild;
62   // Enable verbose mode.
63   static constexpr bool kVerboseMode = false;
64   // If kGrayDirtyImmuneObjects is true then we gray dirty objects in the GC pause to prevent dirty
65   // pages.
66   static constexpr bool kGrayDirtyImmuneObjects = true;
67 
68   explicit ConcurrentCopying(Heap* heap,
69                              const std::string& name_prefix = "",
70                              bool measure_read_barrier_slow_path = false);
71   ~ConcurrentCopying();
72 
73   virtual void RunPhases() OVERRIDE
74       REQUIRES(!immune_gray_stack_lock_,
75                !mark_stack_lock_,
76                !rb_slow_path_histogram_lock_,
77                !skipped_blocks_lock_);
78   void InitializePhase() REQUIRES_SHARED(Locks::mutator_lock_)
79       REQUIRES(!mark_stack_lock_, !immune_gray_stack_lock_);
80   void MarkingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
81       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
82   void ReclaimPhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
83   void FinishPhase() REQUIRES(!mark_stack_lock_,
84                               !rb_slow_path_histogram_lock_,
85                               !skipped_blocks_lock_);
86 
87   void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
88       REQUIRES(!Locks::heap_bitmap_lock_);
GetGcType()89   virtual GcType GetGcType() const OVERRIDE {
90     return kGcTypePartial;
91   }
GetCollectorType()92   virtual CollectorType GetCollectorType() const OVERRIDE {
93     return kCollectorTypeCC;
94   }
95   virtual void RevokeAllThreadLocalBuffers() OVERRIDE;
SetRegionSpace(space::RegionSpace * region_space)96   void SetRegionSpace(space::RegionSpace* region_space) {
97     DCHECK(region_space != nullptr);
98     region_space_ = region_space;
99   }
RegionSpace()100   space::RegionSpace* RegionSpace() {
101     return region_space_;
102   }
103   void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref)
104       REQUIRES_SHARED(Locks::mutator_lock_);
105   void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref)
106       REQUIRES_SHARED(Locks::mutator_lock_);
IsInToSpace(mirror::Object * ref)107   bool IsInToSpace(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) {
108     DCHECK(ref != nullptr);
109     return IsMarked(ref) == ref;
110   }
111   template<bool kGrayImmuneObject = true, bool kFromGCThread = false>
112   ALWAYS_INLINE mirror::Object* Mark(mirror::Object* from_ref,
113                                      mirror::Object* holder = nullptr,
114                                      MemberOffset offset = MemberOffset(0))
115       REQUIRES_SHARED(Locks::mutator_lock_)
116       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
117   ALWAYS_INLINE mirror::Object* MarkFromReadBarrier(mirror::Object* from_ref)
118       REQUIRES_SHARED(Locks::mutator_lock_)
119       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
IsMarking()120   bool IsMarking() const {
121     return is_marking_;
122   }
123   // We may want to use read barrier entrypoints before is_marking_ is true since concurrent graying
124   // creates a small window where we might dispatch on these entrypoints.
IsUsingReadBarrierEntrypoints()125   bool IsUsingReadBarrierEntrypoints() const {
126     return is_using_read_barrier_entrypoints_;
127   }
IsActive()128   bool IsActive() const {
129     return is_active_;
130   }
GetBarrier()131   Barrier& GetBarrier() {
132     return *gc_barrier_;
133   }
IsWeakRefAccessEnabled()134   bool IsWeakRefAccessEnabled() REQUIRES(Locks::thread_list_lock_) {
135     return weak_ref_access_enabled_;
136   }
137   void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
138       REQUIRES(!mark_stack_lock_);
139 
140   virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE
141       REQUIRES_SHARED(Locks::mutator_lock_);
142 
143  private:
144   void PushOntoMarkStack(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
145       REQUIRES(!mark_stack_lock_);
146   mirror::Object* Copy(mirror::Object* from_ref,
147                        mirror::Object* holder,
148                        MemberOffset offset)
149       REQUIRES_SHARED(Locks::mutator_lock_)
150       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
151   void Scan(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
152       REQUIRES(!mark_stack_lock_);
153   void Process(mirror::Object* obj, MemberOffset offset)
154       REQUIRES_SHARED(Locks::mutator_lock_)
155       REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_, !immune_gray_stack_lock_);
156   virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
157       OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
158       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
159   template<bool kGrayImmuneObject>
160   void MarkRoot(mirror::CompressedReference<mirror::Object>* root)
161       REQUIRES_SHARED(Locks::mutator_lock_)
162       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
163   virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
164                           const RootInfo& info)
165       OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
166       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
167   void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_);
168   accounting::ObjectStack* GetAllocationStack();
169   accounting::ObjectStack* GetLiveStack();
170   virtual void ProcessMarkStack() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
171       REQUIRES(!mark_stack_lock_);
172   bool ProcessMarkStackOnce() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
173   void ProcessMarkStackRef(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
174       REQUIRES(!mark_stack_lock_);
175   void GrayAllDirtyImmuneObjects()
176       REQUIRES(Locks::mutator_lock_)
177       REQUIRES(!mark_stack_lock_);
178   void GrayAllNewlyDirtyImmuneObjects()
179       REQUIRES(Locks::mutator_lock_)
180       REQUIRES(!mark_stack_lock_);
181   void VerifyGrayImmuneObjects()
182       REQUIRES(Locks::mutator_lock_)
183       REQUIRES(!mark_stack_lock_);
184   void VerifyNoMissingCardMarks()
185       REQUIRES(Locks::mutator_lock_)
186       REQUIRES(!mark_stack_lock_);
187   size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
188       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
189   void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
190       REQUIRES_SHARED(Locks::mutator_lock_);
191   void SwitchToSharedMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_)
192       REQUIRES(!mark_stack_lock_);
193   void SwitchToGcExclusiveMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_);
194   virtual void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
195                                       ObjPtr<mirror::Reference> reference) OVERRIDE
196       REQUIRES_SHARED(Locks::mutator_lock_);
197   void ProcessReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
198   virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE
199       REQUIRES_SHARED(Locks::mutator_lock_)
200       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
201   virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref,
202                                  bool do_atomic_update) OVERRIDE
203       REQUIRES_SHARED(Locks::mutator_lock_)
204       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
205   bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
206       REQUIRES_SHARED(Locks::mutator_lock_);
207   virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
208                                            bool do_atomic_update) OVERRIDE
209       REQUIRES_SHARED(Locks::mutator_lock_);
210   void SweepSystemWeaks(Thread* self)
211       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
212   void Sweep(bool swap_bitmaps)
213       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
214   void SweepLargeObjects(bool swap_bitmaps)
215       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
216   void MarkZygoteLargeObjects()
217       REQUIRES_SHARED(Locks::mutator_lock_);
218   void FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size)
219       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
220       REQUIRES_SHARED(Locks::mutator_lock_);
221   mirror::Object* AllocateInSkippedBlock(size_t alloc_size)
222       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
223       REQUIRES_SHARED(Locks::mutator_lock_);
224   void CheckEmptyMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
225   void IssueEmptyCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
226   bool IsOnAllocStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
227   mirror::Object* GetFwdPtr(mirror::Object* from_ref)
228       REQUIRES_SHARED(Locks::mutator_lock_);
229   void FlipThreadRoots() REQUIRES(!Locks::mutator_lock_);
230   void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_);
231   void RecordLiveStackFreezeSize(Thread* self);
232   void ComputeUnevacFromSpaceLiveRatio();
233   void LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset)
234       REQUIRES_SHARED(Locks::mutator_lock_);
235   void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref)
236       REQUIRES_SHARED(Locks::mutator_lock_);
237   void ReenableWeakRefAccess(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
238   void DisableMarking() REQUIRES_SHARED(Locks::mutator_lock_);
239   void IssueDisableMarkingCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
240   void ExpandGcMarkStack() REQUIRES_SHARED(Locks::mutator_lock_);
241   mirror::Object* MarkNonMoving(mirror::Object* from_ref,
242                                 mirror::Object* holder = nullptr,
243                                 MemberOffset offset = MemberOffset(0))
244       REQUIRES_SHARED(Locks::mutator_lock_)
245       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
246   ALWAYS_INLINE mirror::Object* MarkUnevacFromSpaceRegion(mirror::Object* from_ref,
247       accounting::SpaceBitmap<kObjectAlignment>* bitmap)
248       REQUIRES_SHARED(Locks::mutator_lock_)
249       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
250   template<bool kGrayImmuneObject>
251   ALWAYS_INLINE mirror::Object* MarkImmuneSpace(mirror::Object* from_ref)
252       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!immune_gray_stack_lock_);
253   void PushOntoFalseGrayStack(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
254       REQUIRES(!mark_stack_lock_);
255   void ProcessFalseGrayStack() REQUIRES_SHARED(Locks::mutator_lock_)
256       REQUIRES(!mark_stack_lock_);
257   void ScanImmuneObject(mirror::Object* obj)
258       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
259   mirror::Object* MarkFromReadBarrierWithMeasurements(mirror::Object* from_ref)
260       REQUIRES_SHARED(Locks::mutator_lock_)
261       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
262   void DumpPerformanceInfo(std::ostream& os) OVERRIDE REQUIRES(!rb_slow_path_histogram_lock_);
263   // Set the read barrier mark entrypoints to non-null.
264   void ActivateReadBarrierEntrypoints();
265 
266   space::RegionSpace* region_space_;      // The underlying region space.
267   std::unique_ptr<Barrier> gc_barrier_;
268   std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
269   std::unique_ptr<accounting::ObjectStack> rb_mark_bit_stack_;
270   bool rb_mark_bit_stack_full_;
271   std::vector<mirror::Object*> false_gray_stack_ GUARDED_BY(mark_stack_lock_);
272   Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
273   std::vector<accounting::ObjectStack*> revoked_mark_stacks_
274       GUARDED_BY(mark_stack_lock_);
275   static constexpr size_t kMarkStackSize = kPageSize;
276   static constexpr size_t kMarkStackPoolSize = 256;
277   std::vector<accounting::ObjectStack*> pooled_mark_stacks_
278       GUARDED_BY(mark_stack_lock_);
279   Thread* thread_running_gc_;
280   bool is_marking_;                       // True while marking is ongoing.
281   // True while we might dispatch on the read barrier entrypoints.
282   bool is_using_read_barrier_entrypoints_;
283   bool is_active_;                        // True while the collection is ongoing.
284   bool is_asserting_to_space_invariant_;  // True while asserting the to-space invariant.
285   ImmuneSpaces immune_spaces_;
286   accounting::ContinuousSpaceBitmap* region_space_bitmap_;
287   // A cache of Heap::GetMarkBitmap().
288   accounting::HeapBitmap* heap_mark_bitmap_;
289   size_t live_stack_freeze_size_;
290   size_t from_space_num_objects_at_first_pause_;
291   size_t from_space_num_bytes_at_first_pause_;
292   Atomic<int> is_mark_stack_push_disallowed_;
293   enum MarkStackMode {
294     kMarkStackModeOff = 0,      // Mark stack is off.
295     kMarkStackModeThreadLocal,  // All threads except for the GC-running thread push refs onto
296                                 // thread-local mark stacks. The GC-running thread pushes onto and
297                                 // pops off the GC mark stack without a lock.
298     kMarkStackModeShared,       // All threads share the GC mark stack with a lock.
299     kMarkStackModeGcExclusive   // The GC-running thread pushes onto and pops from the GC mark stack
300                                 // without a lock. Other threads won't access the mark stack.
301   };
302   Atomic<MarkStackMode> mark_stack_mode_;
303   bool weak_ref_access_enabled_ GUARDED_BY(Locks::thread_list_lock_);
304 
305   // How many objects and bytes we moved. Used for accounting.
306   Atomic<size_t> bytes_moved_;
307   Atomic<size_t> objects_moved_;
308   Atomic<uint64_t> cumulative_bytes_moved_;
309   Atomic<uint64_t> cumulative_objects_moved_;
310 
311   // The skipped blocks are memory blocks/chucks that were copies of
312   // objects that were unused due to lost races (cas failures) at
313   // object copy/forward pointer install. They are reused.
314   Mutex skipped_blocks_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
315   std::multimap<size_t, uint8_t*> skipped_blocks_map_ GUARDED_BY(skipped_blocks_lock_);
316   Atomic<size_t> to_space_bytes_skipped_;
317   Atomic<size_t> to_space_objects_skipped_;
318 
319   // If measure_read_barrier_slow_path_ is true, we count how long is spent in MarkFromReadBarrier
320   // and also log.
321   bool measure_read_barrier_slow_path_;
322   // mark_from_read_barrier_measurements_ is true if systrace is enabled or
323   // measure_read_barrier_time_ is true.
324   bool mark_from_read_barrier_measurements_;
325   Atomic<uint64_t> rb_slow_path_ns_;
326   Atomic<uint64_t> rb_slow_path_count_;
327   Atomic<uint64_t> rb_slow_path_count_gc_;
328   mutable Mutex rb_slow_path_histogram_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
329   Histogram<uint64_t> rb_slow_path_time_histogram_ GUARDED_BY(rb_slow_path_histogram_lock_);
330   uint64_t rb_slow_path_count_total_ GUARDED_BY(rb_slow_path_histogram_lock_);
331   uint64_t rb_slow_path_count_gc_total_ GUARDED_BY(rb_slow_path_histogram_lock_);
332 
333   accounting::ReadBarrierTable* rb_table_;
334   bool force_evacuate_all_;  // True if all regions are evacuated.
335   Atomic<bool> updated_all_immune_objects_;
336   bool gc_grays_immune_objects_;
337   Mutex immune_gray_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
338   std::vector<mirror::Object*> immune_gray_stack_ GUARDED_BY(immune_gray_stack_lock_);
339 
340   // Class of java.lang.Object. Filled in from WellKnownClasses in FlipCallback. Must
341   // be filled in before flipping thread roots so that FillDummyObject can run. Not
342   // ObjPtr since the GC may transition to suspended and runnable between phases.
343   mirror::Class* java_lang_Object_;
344 
345   class ActivateReadBarrierEntrypointsCallback;
346   class ActivateReadBarrierEntrypointsCheckpoint;
347   class AssertToSpaceInvariantFieldVisitor;
348   class AssertToSpaceInvariantRefsVisitor;
349   class ClearBlackPtrsVisitor;
350   class ComputeUnevacFromSpaceLiveRatioVisitor;
351   class DisableMarkingCallback;
352   class DisableMarkingCheckpoint;
353   class DisableWeakRefAccessCallback;
354   class FlipCallback;
355   template <bool kConcurrent> class GrayImmuneObjectVisitor;
356   class ImmuneSpaceScanObjVisitor;
357   class LostCopyVisitor;
358   class RefFieldsVisitor;
359   class RevokeThreadLocalMarkStackCheckpoint;
360   class ScopedGcGraysImmuneObjects;
361   class ThreadFlipVisitor;
362   class VerifyGrayImmuneObjectsVisitor;
363   class VerifyNoFromSpaceRefsFieldVisitor;
364   class VerifyNoFromSpaceRefsVisitor;
365   class VerifyNoMissingCardMarkVisitor;
366 
367   DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
368 };
369 
370 }  // namespace collector
371 }  // namespace gc
372 }  // namespace art
373 
374 #endif  // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
375