• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
18 #define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
19 
20 #include "barrier.h"
21 #include "garbage_collector.h"
22 #include "immune_spaces.h"
23 #include "jni.h"
24 #include "object_callbacks.h"
25 #include "offsets.h"
26 #include "gc/accounting/space_bitmap.h"
27 #include "mirror/object.h"
28 #include "mirror/object_reference.h"
29 #include "safe_map.h"
30 
31 #include <unordered_map>
32 #include <vector>
33 
34 namespace art {
35 class Closure;
36 class RootInfo;
37 
38 namespace gc {
39 
40 namespace accounting {
41   template<typename T> class AtomicStack;
42   typedef AtomicStack<mirror::Object> ObjectStack;
43   typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap;
44   class HeapBitmap;
45   class ReadBarrierTable;
46 }  // namespace accounting
47 
48 namespace space {
49   class RegionSpace;
50 }  // namespace space
51 
52 namespace collector {
53 
54 class ConcurrentCopying : public GarbageCollector {
55  public:
56   // Enable the no-from-space-refs verification at the pause.
57   static constexpr bool kEnableNoFromSpaceRefsVerification = kIsDebugBuild;
58   // Enable the from-space bytes/objects check.
59   static constexpr bool kEnableFromSpaceAccountingCheck = kIsDebugBuild;
60   // Enable verbose mode.
61   static constexpr bool kVerboseMode = false;
62   // If kGrayDirtyImmuneObjects is true then we gray dirty objects in the GC pause to prevent dirty
63   // pages.
64   static constexpr bool kGrayDirtyImmuneObjects = true;
65 
66   explicit ConcurrentCopying(Heap* heap,
67                              const std::string& name_prefix = "",
68                              bool measure_read_barrier_slow_path = false);
69   ~ConcurrentCopying();
70 
71   virtual void RunPhases() OVERRIDE
72       REQUIRES(!immune_gray_stack_lock_,
73                !mark_stack_lock_,
74                !rb_slow_path_histogram_lock_,
75                !skipped_blocks_lock_);
76   void InitializePhase() REQUIRES_SHARED(Locks::mutator_lock_)
77       REQUIRES(!mark_stack_lock_, !immune_gray_stack_lock_);
78   void MarkingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
79       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
80   void ReclaimPhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
81   void FinishPhase() REQUIRES(!mark_stack_lock_,
82                               !rb_slow_path_histogram_lock_,
83                               !skipped_blocks_lock_);
84 
85   void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
86       REQUIRES(!Locks::heap_bitmap_lock_);
GetGcType()87   virtual GcType GetGcType() const OVERRIDE {
88     return kGcTypePartial;
89   }
GetCollectorType()90   virtual CollectorType GetCollectorType() const OVERRIDE {
91     return kCollectorTypeCC;
92   }
93   virtual void RevokeAllThreadLocalBuffers() OVERRIDE;
SetRegionSpace(space::RegionSpace * region_space)94   void SetRegionSpace(space::RegionSpace* region_space) {
95     DCHECK(region_space != nullptr);
96     region_space_ = region_space;
97   }
RegionSpace()98   space::RegionSpace* RegionSpace() {
99     return region_space_;
100   }
101   void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref)
102       REQUIRES_SHARED(Locks::mutator_lock_);
103   void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref)
104       REQUIRES_SHARED(Locks::mutator_lock_);
IsInToSpace(mirror::Object * ref)105   bool IsInToSpace(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) {
106     DCHECK(ref != nullptr);
107     return IsMarked(ref) == ref;
108   }
109   template<bool kGrayImmuneObject = true, bool kFromGCThread = false>
110   ALWAYS_INLINE mirror::Object* Mark(mirror::Object* from_ref,
111                                      mirror::Object* holder = nullptr,
112                                      MemberOffset offset = MemberOffset(0))
113       REQUIRES_SHARED(Locks::mutator_lock_)
114       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
115   ALWAYS_INLINE mirror::Object* MarkFromReadBarrier(mirror::Object* from_ref)
116       REQUIRES_SHARED(Locks::mutator_lock_)
117       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
IsMarking()118   bool IsMarking() const {
119     return is_marking_;
120   }
IsActive()121   bool IsActive() const {
122     return is_active_;
123   }
GetBarrier()124   Barrier& GetBarrier() {
125     return *gc_barrier_;
126   }
IsWeakRefAccessEnabled()127   bool IsWeakRefAccessEnabled() REQUIRES(Locks::thread_list_lock_) {
128     return weak_ref_access_enabled_;
129   }
130   void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
131       REQUIRES(!mark_stack_lock_);
132 
133   virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE
134       REQUIRES_SHARED(Locks::mutator_lock_);
135 
136  private:
137   void PushOntoMarkStack(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
138       REQUIRES(!mark_stack_lock_);
139   mirror::Object* Copy(mirror::Object* from_ref,
140                        mirror::Object* holder,
141                        MemberOffset offset)
142       REQUIRES_SHARED(Locks::mutator_lock_)
143       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
144   void Scan(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
145       REQUIRES(!mark_stack_lock_);
146   void Process(mirror::Object* obj, MemberOffset offset)
147       REQUIRES_SHARED(Locks::mutator_lock_)
148       REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_, !immune_gray_stack_lock_);
149   virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
150       OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
151       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
152   template<bool kGrayImmuneObject>
153   void MarkRoot(mirror::CompressedReference<mirror::Object>* root)
154       REQUIRES_SHARED(Locks::mutator_lock_)
155       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
156   virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
157                           const RootInfo& info)
158       OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
159       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
160   void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_);
161   accounting::ObjectStack* GetAllocationStack();
162   accounting::ObjectStack* GetLiveStack();
163   virtual void ProcessMarkStack() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
164       REQUIRES(!mark_stack_lock_);
165   bool ProcessMarkStackOnce() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
166   void ProcessMarkStackRef(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
167       REQUIRES(!mark_stack_lock_);
168   void GrayAllDirtyImmuneObjects()
169       REQUIRES(Locks::mutator_lock_)
170       REQUIRES(!mark_stack_lock_);
171   void VerifyGrayImmuneObjects()
172       REQUIRES(Locks::mutator_lock_)
173       REQUIRES(!mark_stack_lock_);
174   static void VerifyNoMissingCardMarkCallback(mirror::Object* obj, void* arg)
175       REQUIRES(Locks::mutator_lock_)
176       REQUIRES(!mark_stack_lock_);
177   void VerifyNoMissingCardMarks()
178       REQUIRES(Locks::mutator_lock_)
179       REQUIRES(!mark_stack_lock_);
180   size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
181       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
182   void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
183       REQUIRES_SHARED(Locks::mutator_lock_);
184   void SwitchToSharedMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_)
185       REQUIRES(!mark_stack_lock_);
186   void SwitchToGcExclusiveMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_);
187   virtual void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
188                                       ObjPtr<mirror::Reference> reference) OVERRIDE
189       REQUIRES_SHARED(Locks::mutator_lock_);
190   void ProcessReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
191   virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE
192       REQUIRES_SHARED(Locks::mutator_lock_)
193       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
194   virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref,
195                                  bool do_atomic_update) OVERRIDE
196       REQUIRES_SHARED(Locks::mutator_lock_)
197       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
198   bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
199       REQUIRES_SHARED(Locks::mutator_lock_);
200   virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
201                                            bool do_atomic_update) OVERRIDE
202       REQUIRES_SHARED(Locks::mutator_lock_);
203   void SweepSystemWeaks(Thread* self)
204       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
205   void Sweep(bool swap_bitmaps)
206       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
207   void SweepLargeObjects(bool swap_bitmaps)
208       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
209   void MarkZygoteLargeObjects()
210       REQUIRES_SHARED(Locks::mutator_lock_);
211   void FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size)
212       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
213       REQUIRES_SHARED(Locks::mutator_lock_);
214   mirror::Object* AllocateInSkippedBlock(size_t alloc_size)
215       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
216       REQUIRES_SHARED(Locks::mutator_lock_);
217   void CheckEmptyMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
218   void IssueEmptyCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
219   bool IsOnAllocStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
220   mirror::Object* GetFwdPtr(mirror::Object* from_ref)
221       REQUIRES_SHARED(Locks::mutator_lock_);
222   void FlipThreadRoots() REQUIRES(!Locks::mutator_lock_);
223   void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_);
224   void RecordLiveStackFreezeSize(Thread* self);
225   void ComputeUnevacFromSpaceLiveRatio();
226   void LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset)
227       REQUIRES_SHARED(Locks::mutator_lock_);
228   void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref)
229       REQUIRES_SHARED(Locks::mutator_lock_);
230   void ReenableWeakRefAccess(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
231   void DisableMarking() REQUIRES_SHARED(Locks::mutator_lock_);
232   void IssueDisableMarkingCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
233   void ExpandGcMarkStack() REQUIRES_SHARED(Locks::mutator_lock_);
234   mirror::Object* MarkNonMoving(mirror::Object* from_ref,
235                                 mirror::Object* holder = nullptr,
236                                 MemberOffset offset = MemberOffset(0))
237       REQUIRES_SHARED(Locks::mutator_lock_)
238       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
239   ALWAYS_INLINE mirror::Object* MarkUnevacFromSpaceRegion(mirror::Object* from_ref,
240       accounting::SpaceBitmap<kObjectAlignment>* bitmap)
241       REQUIRES_SHARED(Locks::mutator_lock_)
242       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
243   template<bool kGrayImmuneObject>
244   ALWAYS_INLINE mirror::Object* MarkImmuneSpace(mirror::Object* from_ref)
245       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!immune_gray_stack_lock_);
246   void PushOntoFalseGrayStack(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
247       REQUIRES(!mark_stack_lock_);
248   void ProcessFalseGrayStack() REQUIRES_SHARED(Locks::mutator_lock_)
249       REQUIRES(!mark_stack_lock_);
250   void ScanImmuneObject(mirror::Object* obj)
251       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
252   mirror::Object* MarkFromReadBarrierWithMeasurements(mirror::Object* from_ref)
253       REQUIRES_SHARED(Locks::mutator_lock_)
254       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
255   void DumpPerformanceInfo(std::ostream& os) OVERRIDE REQUIRES(!rb_slow_path_histogram_lock_);
256 
257   space::RegionSpace* region_space_;      // The underlying region space.
258   std::unique_ptr<Barrier> gc_barrier_;
259   std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
260   std::unique_ptr<accounting::ObjectStack> rb_mark_bit_stack_;
261   bool rb_mark_bit_stack_full_;
262   std::vector<mirror::Object*> false_gray_stack_ GUARDED_BY(mark_stack_lock_);
263   Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
264   std::vector<accounting::ObjectStack*> revoked_mark_stacks_
265       GUARDED_BY(mark_stack_lock_);
266   static constexpr size_t kMarkStackSize = kPageSize;
267   static constexpr size_t kMarkStackPoolSize = 256;
268   std::vector<accounting::ObjectStack*> pooled_mark_stacks_
269       GUARDED_BY(mark_stack_lock_);
270   Thread* thread_running_gc_;
271   bool is_marking_;                       // True while marking is ongoing.
272   bool is_active_;                        // True while the collection is ongoing.
273   bool is_asserting_to_space_invariant_;  // True while asserting the to-space invariant.
274   ImmuneSpaces immune_spaces_;
275   accounting::SpaceBitmap<kObjectAlignment>* region_space_bitmap_;
276   // A cache of Heap::GetMarkBitmap().
277   accounting::HeapBitmap* heap_mark_bitmap_;
278   size_t live_stack_freeze_size_;
279   size_t from_space_num_objects_at_first_pause_;
280   size_t from_space_num_bytes_at_first_pause_;
281   Atomic<int> is_mark_stack_push_disallowed_;
282   enum MarkStackMode {
283     kMarkStackModeOff = 0,      // Mark stack is off.
284     kMarkStackModeThreadLocal,  // All threads except for the GC-running thread push refs onto
285                                 // thread-local mark stacks. The GC-running thread pushes onto and
286                                 // pops off the GC mark stack without a lock.
287     kMarkStackModeShared,       // All threads share the GC mark stack with a lock.
288     kMarkStackModeGcExclusive   // The GC-running thread pushes onto and pops from the GC mark stack
289                                 // without a lock. Other threads won't access the mark stack.
290   };
291   Atomic<MarkStackMode> mark_stack_mode_;
292   bool weak_ref_access_enabled_ GUARDED_BY(Locks::thread_list_lock_);
293 
294   // How many objects and bytes we moved. Used for accounting.
295   Atomic<size_t> bytes_moved_;
296   Atomic<size_t> objects_moved_;
297   Atomic<uint64_t> cumulative_bytes_moved_;
298   Atomic<uint64_t> cumulative_objects_moved_;
299 
300   // The skipped blocks are memory blocks/chucks that were copies of
301   // objects that were unused due to lost races (cas failures) at
302   // object copy/forward pointer install. They are reused.
303   Mutex skipped_blocks_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
304   std::multimap<size_t, uint8_t*> skipped_blocks_map_ GUARDED_BY(skipped_blocks_lock_);
305   Atomic<size_t> to_space_bytes_skipped_;
306   Atomic<size_t> to_space_objects_skipped_;
307 
308   // If measure_read_barrier_slow_path_ is true, we count how long is spent in MarkFromReadBarrier
309   // and also log.
310   bool measure_read_barrier_slow_path_;
311   // mark_from_read_barrier_measurements_ is true if systrace is enabled or
312   // measure_read_barrier_time_ is true.
313   bool mark_from_read_barrier_measurements_;
314   Atomic<uint64_t> rb_slow_path_ns_;
315   Atomic<uint64_t> rb_slow_path_count_;
316   Atomic<uint64_t> rb_slow_path_count_gc_;
317   mutable Mutex rb_slow_path_histogram_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
318   Histogram<uint64_t> rb_slow_path_time_histogram_ GUARDED_BY(rb_slow_path_histogram_lock_);
319   uint64_t rb_slow_path_count_total_ GUARDED_BY(rb_slow_path_histogram_lock_);
320   uint64_t rb_slow_path_count_gc_total_ GUARDED_BY(rb_slow_path_histogram_lock_);
321 
322   accounting::ReadBarrierTable* rb_table_;
323   bool force_evacuate_all_;  // True if all regions are evacuated.
324   Atomic<bool> updated_all_immune_objects_;
325   bool gc_grays_immune_objects_;
326   Mutex immune_gray_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
327   std::vector<mirror::Object*> immune_gray_stack_ GUARDED_BY(immune_gray_stack_lock_);
328 
329   // Class of java.lang.Object. Filled in from WellKnownClasses in FlipCallback. Must
330   // be filled in before flipping thread roots so that FillDummyObject can run. Not
331   // ObjPtr since the GC may transition to suspended and runnable between phases.
332   mirror::Class* java_lang_Object_;
333 
334   class AssertToSpaceInvariantFieldVisitor;
335   class AssertToSpaceInvariantObjectVisitor;
336   class AssertToSpaceInvariantRefsVisitor;
337   class ClearBlackPtrsVisitor;
338   class ComputeUnevacFromSpaceLiveRatioVisitor;
339   class DisableMarkingCallback;
340   class DisableMarkingCheckpoint;
341   class DisableWeakRefAccessCallback;
342   class FlipCallback;
343   class GrayImmuneObjectVisitor;
344   class ImmuneSpaceScanObjVisitor;
345   class LostCopyVisitor;
346   class RefFieldsVisitor;
347   class RevokeThreadLocalMarkStackCheckpoint;
348   class ScopedGcGraysImmuneObjects;
349   class ThreadFlipVisitor;
350   class VerifyGrayImmuneObjectsVisitor;
351   class VerifyNoFromSpaceRefsFieldVisitor;
352   class VerifyNoFromSpaceRefsObjectVisitor;
353   class VerifyNoFromSpaceRefsVisitor;
354   class VerifyNoMissingCardMarkVisitor;
355 
356   DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
357 };
358 
359 }  // namespace collector
360 }  // namespace gc
361 }  // namespace art
362 
363 #endif  // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
364