• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "mark_sweep.h"
18 
19 #include <atomic>
20 #include <climits>
21 #include <functional>
22 #include <numeric>
23 #include <vector>
24 
25 #include "base/bounded_fifo.h"
26 #include "base/enums.h"
27 #include "base/file_utils.h"
28 #include "base/logging.h"  // For VLOG.
29 #include "base/macros.h"
30 #include "base/mutex-inl.h"
31 #include "base/systrace.h"
32 #include "base/time_utils.h"
33 #include "base/timing_logger.h"
34 #include "gc/accounting/card_table-inl.h"
35 #include "gc/accounting/heap_bitmap-inl.h"
36 #include "gc/accounting/mod_union_table.h"
37 #include "gc/accounting/space_bitmap-inl.h"
38 #include "gc/heap.h"
39 #include "gc/reference_processor.h"
40 #include "gc/space/large_object_space.h"
41 #include "gc/space/space-inl.h"
42 #include "mark_sweep-inl.h"
43 #include "mirror/object-inl.h"
44 #include "runtime.h"
45 #include "scoped_thread_state_change-inl.h"
46 #include "thread-current-inl.h"
47 #include "thread_list.h"
48 
49 namespace art {
50 namespace gc {
51 namespace collector {
52 
53 // Performance options.
54 static constexpr bool kUseRecursiveMark = false;
55 static constexpr bool kUseMarkStackPrefetch = true;
56 static constexpr size_t kSweepArrayChunkFreeSize = 1024;
57 static constexpr bool kPreCleanCards = true;
58 
59 // Parallelism options.
60 static constexpr bool kParallelCardScan = true;
61 static constexpr bool kParallelRecursiveMark = true;
62 // Don't attempt to parallelize mark stack processing unless the mark stack is at least n
63 // elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not
64 // having this can add overhead in ProcessReferences since we may end up doing many calls of
65 // ProcessMarkStack with very small mark stacks.
66 static constexpr size_t kMinimumParallelMarkStackSize = 128;
67 static constexpr bool kParallelProcessMarkStack = true;
68 
69 // Profiling and information flags.
70 static constexpr bool kProfileLargeObjects = false;
71 static constexpr bool kMeasureOverhead = false;
72 static constexpr bool kCountTasks = false;
73 static constexpr bool kCountMarkedObjects = false;
74 
75 // Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
76 static constexpr bool kCheckLocks = kDebugLocking;
77 static constexpr bool kVerifyRootsMarked = kIsDebugBuild;
78 
79 // If true, revoke the rosalloc thread-local buffers at the
80 // checkpoint, as opposed to during the pause.
81 static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true;
82 
BindBitmaps()83 void MarkSweep::BindBitmaps() {
84   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
85   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
86   // Mark all of the spaces we never collect as immune.
87   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
88     if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
89       immune_spaces_.AddSpace(space);
90     }
91   }
92 }
93 
MarkSweep(Heap * heap,bool is_concurrent,const std::string & name_prefix)94 MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
95     : GarbageCollector(heap,
96                        name_prefix +
97                        (is_concurrent ? "concurrent mark sweep": "mark sweep")),
98       current_space_bitmap_(nullptr),
99       mark_bitmap_(nullptr),
100       mark_stack_(nullptr),
101       gc_barrier_(new Barrier(0)),
102       mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
103       is_concurrent_(is_concurrent),
104       live_stack_freeze_size_(0) {
105   std::string error_msg;
106   MemMap* mem_map = MemMap::MapAnonymous(
107       "mark sweep sweep array free buffer", nullptr,
108       RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
109       PROT_READ | PROT_WRITE, false, false, &error_msg);
110   CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg;
111   sweep_array_free_buffer_mem_map_.reset(mem_map);
112 }
113 
InitializePhase()114 void MarkSweep::InitializePhase() {
115   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
116   mark_stack_ = heap_->GetMarkStack();
117   DCHECK(mark_stack_ != nullptr);
118   immune_spaces_.Reset();
119   no_reference_class_count_.StoreRelaxed(0);
120   normal_count_.StoreRelaxed(0);
121   class_count_.StoreRelaxed(0);
122   object_array_count_.StoreRelaxed(0);
123   other_count_.StoreRelaxed(0);
124   reference_count_.StoreRelaxed(0);
125   large_object_test_.StoreRelaxed(0);
126   large_object_mark_.StoreRelaxed(0);
127   overhead_time_ .StoreRelaxed(0);
128   work_chunks_created_.StoreRelaxed(0);
129   work_chunks_deleted_.StoreRelaxed(0);
130   mark_null_count_.StoreRelaxed(0);
131   mark_immune_count_.StoreRelaxed(0);
132   mark_fastpath_count_.StoreRelaxed(0);
133   mark_slowpath_count_.StoreRelaxed(0);
134   {
135     // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
136     ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
137     mark_bitmap_ = heap_->GetMarkBitmap();
138   }
139   if (!GetCurrentIteration()->GetClearSoftReferences()) {
140     // Always clear soft references if a non-sticky collection.
141     GetCurrentIteration()->SetClearSoftReferences(GetGcType() != collector::kGcTypeSticky);
142   }
143 }
144 
RunPhases()145 void MarkSweep::RunPhases() {
146   Thread* self = Thread::Current();
147   InitializePhase();
148   Locks::mutator_lock_->AssertNotHeld(self);
149   if (IsConcurrent()) {
150     GetHeap()->PreGcVerification(this);
151     {
152       ReaderMutexLock mu(self, *Locks::mutator_lock_);
153       MarkingPhase();
154     }
155     ScopedPause pause(this);
156     GetHeap()->PrePauseRosAllocVerification(this);
157     PausePhase();
158     RevokeAllThreadLocalBuffers();
159   } else {
160     ScopedPause pause(this);
161     GetHeap()->PreGcVerificationPaused(this);
162     MarkingPhase();
163     GetHeap()->PrePauseRosAllocVerification(this);
164     PausePhase();
165     RevokeAllThreadLocalBuffers();
166   }
167   {
168     // Sweeping always done concurrently, even for non concurrent mark sweep.
169     ReaderMutexLock mu(self, *Locks::mutator_lock_);
170     ReclaimPhase();
171   }
172   GetHeap()->PostGcVerification(this);
173   FinishPhase();
174 }
175 
ProcessReferences(Thread * self)176 void MarkSweep::ProcessReferences(Thread* self) {
177   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
178   GetHeap()->GetReferenceProcessor()->ProcessReferences(
179       true,
180       GetTimings(),
181       GetCurrentIteration()->GetClearSoftReferences(),
182       this);
183 }
184 
PausePhase()185 void MarkSweep::PausePhase() {
186   TimingLogger::ScopedTiming t("(Paused)PausePhase", GetTimings());
187   Thread* self = Thread::Current();
188   Locks::mutator_lock_->AssertExclusiveHeld(self);
189   if (IsConcurrent()) {
190     // Handle the dirty objects if we are a concurrent GC.
191     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
192     // Re-mark root set.
193     ReMarkRoots();
194     // Scan dirty objects, this is only required if we are doing concurrent GC.
195     RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
196   }
197   {
198     TimingLogger::ScopedTiming t2("SwapStacks", GetTimings());
199     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
200     heap_->SwapStacks();
201     live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
202     // Need to revoke all the thread local allocation stacks since we just swapped the allocation
203     // stacks and don't want anybody to allocate into the live stack.
204     RevokeAllThreadLocalAllocationStacks(self);
205   }
206   heap_->PreSweepingGcVerification(this);
207   // Disallow new system weaks to prevent a race which occurs when someone adds a new system
208   // weak before we sweep them. Since this new system weak may not be marked, the GC may
209   // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
210   // reference to a string that is about to be swept.
211   Runtime::Current()->DisallowNewSystemWeaks();
212   // Enable the reference processing slow path, needs to be done with mutators paused since there
213   // is no lock in the GetReferent fast path.
214   GetHeap()->GetReferenceProcessor()->EnableSlowPath();
215 }
216 
PreCleanCards()217 void MarkSweep::PreCleanCards() {
218   // Don't do this for non concurrent GCs since they don't have any dirty cards.
219   if (kPreCleanCards && IsConcurrent()) {
220     TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
221     Thread* self = Thread::Current();
222     CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
223     // Process dirty cards and add dirty cards to mod union tables, also ages cards.
224     heap_->ProcessCards(GetTimings(), false, true, false);
225     // The checkpoint root marking is required to avoid a race condition which occurs if the
226     // following happens during a reference write:
227     // 1. mutator dirties the card (write barrier)
228     // 2. GC ages the card (the above ProcessCards call)
229     // 3. GC scans the object (the RecursiveMarkDirtyObjects call below)
230     // 4. mutator writes the value (corresponding to the write barrier in 1.)
231     // This causes the GC to age the card but not necessarily mark the reference which the mutator
232     // wrote into the object stored in the card.
233     // Having the checkpoint fixes this issue since it ensures that the card mark and the
234     // reference write are visible to the GC before the card is scanned (this is due to locks being
235     // acquired / released in the checkpoint code).
236     // The other roots are also marked to help reduce the pause.
237     MarkRootsCheckpoint(self, false);
238     MarkNonThreadRoots();
239     MarkConcurrentRoots(
240         static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots));
241     // Process the newly aged cards.
242     RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
243     // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live
244     // in the next GC.
245   }
246 }
247 
RevokeAllThreadLocalAllocationStacks(Thread * self)248 void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) {
249   if (kUseThreadLocalAllocationStack) {
250     TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
251     Locks::mutator_lock_->AssertExclusiveHeld(self);
252     heap_->RevokeAllThreadLocalAllocationStacks(self);
253   }
254 }
255 
MarkingPhase()256 void MarkSweep::MarkingPhase() {
257   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
258   Thread* self = Thread::Current();
259   BindBitmaps();
260   FindDefaultSpaceBitmap();
261   // Process dirty cards and add dirty cards to mod union tables.
262   // If the GC type is non sticky, then we just clear the cards of the
263   // alloc space instead of aging them.
264   //
265   // Note that it is fine to clear the cards of the alloc space here,
266   // in the case of a concurrent (non-sticky) mark-sweep GC (whose
267   // marking phase _is_ performed concurrently with mutator threads
268   // running and possibly dirtying cards), as the whole alloc space
269   // will be traced in that case, starting *after* this call to
270   // Heap::ProcessCards (see calls to MarkSweep::MarkRoots and
271   // MarkSweep::MarkReachableObjects). References held by objects on
272   // cards that became dirty *after* the actual marking work started
273   // will be marked in the pause (see MarkSweep::PausePhase), in a
274   // *non-concurrent* way to prevent races with mutator threads.
275   //
276   // TODO: Do we need some sort of fence between the call to
277   // Heap::ProcessCard and the calls to MarkSweep::MarkRoot /
278   // MarkSweep::MarkReachableObjects below to make sure write
279   // operations in the card table clearing the alloc space's dirty
280   // cards (during the call to Heap::ProcessCard) are not reordered
281   // *after* marking actually starts?
282   heap_->ProcessCards(GetTimings(),
283                       /* use_rem_sets */ false,
284                       /* process_alloc_space_cards */ true,
285                       /* clear_alloc_space_cards */ GetGcType() != kGcTypeSticky);
286   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
287   MarkRoots(self);
288   MarkReachableObjects();
289   // Pre-clean dirtied cards to reduce pauses.
290   PreCleanCards();
291 }
292 
293 class MarkSweep::ScanObjectVisitor {
294  public:
ScanObjectVisitor(MarkSweep * const mark_sweep)295   explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
296       : mark_sweep_(mark_sweep) {}
297 
operator ()(ObjPtr<mirror::Object> obj) const298   void operator()(ObjPtr<mirror::Object> obj) const
299       ALWAYS_INLINE
300       REQUIRES(Locks::heap_bitmap_lock_)
301       REQUIRES_SHARED(Locks::mutator_lock_) {
302     if (kCheckLocks) {
303       Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
304       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
305     }
306     mark_sweep_->ScanObject(obj.Ptr());
307   }
308 
309  private:
310   MarkSweep* const mark_sweep_;
311 };
312 
UpdateAndMarkModUnion()313 void MarkSweep::UpdateAndMarkModUnion() {
314   for (const auto& space : immune_spaces_.GetSpaces()) {
315     const char* name = space->IsZygoteSpace()
316         ? "UpdateAndMarkZygoteModUnionTable"
317         : "UpdateAndMarkImageModUnionTable";
318     DCHECK(space->IsZygoteSpace() || space->IsImageSpace()) << *space;
319     TimingLogger::ScopedTiming t(name, GetTimings());
320     accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
321     if (mod_union_table != nullptr) {
322       mod_union_table->UpdateAndMarkReferences(this);
323     } else {
324       // No mod-union table, scan all the live bits. This can only occur for app images.
325       space->GetLiveBitmap()->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
326                                                reinterpret_cast<uintptr_t>(space->End()),
327                                                ScanObjectVisitor(this));
328     }
329   }
330 }
331 
MarkReachableObjects()332 void MarkSweep::MarkReachableObjects() {
333   UpdateAndMarkModUnion();
334   // Recursively mark all the non-image bits set in the mark bitmap.
335   RecursiveMark();
336 }
337 
ReclaimPhase()338 void MarkSweep::ReclaimPhase() {
339   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
340   Thread* const self = Thread::Current();
341   // Process the references concurrently.
342   ProcessReferences(self);
343   SweepSystemWeaks(self);
344   Runtime* const runtime = Runtime::Current();
345   runtime->AllowNewSystemWeaks();
346   // Clean up class loaders after system weaks are swept since that is how we know if class
347   // unloading occurred.
348   runtime->GetClassLinker()->CleanupClassLoaders();
349   {
350     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
351     GetHeap()->RecordFreeRevoke();
352     // Reclaim unmarked objects.
353     Sweep(false);
354     // Swap the live and mark bitmaps for each space which we modified space. This is an
355     // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
356     // bitmaps.
357     SwapBitmaps();
358     // Unbind the live and mark bitmaps.
359     GetHeap()->UnBindBitmaps();
360   }
361 }
362 
FindDefaultSpaceBitmap()363 void MarkSweep::FindDefaultSpaceBitmap() {
364   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
365   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
366     accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap();
367     // We want to have the main space instead of non moving if possible.
368     if (bitmap != nullptr &&
369         space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
370       current_space_bitmap_ = bitmap;
371       // If we are not the non moving space exit the loop early since this will be good enough.
372       if (space != heap_->GetNonMovingSpace()) {
373         break;
374       }
375     }
376   }
377   CHECK(current_space_bitmap_ != nullptr) << "Could not find a default mark bitmap\n"
378       << heap_->DumpSpaces();
379 }
380 
ExpandMarkStack()381 void MarkSweep::ExpandMarkStack() {
382   ResizeMarkStack(mark_stack_->Capacity() * 2);
383 }
384 
ResizeMarkStack(size_t new_size)385 void MarkSweep::ResizeMarkStack(size_t new_size) {
386   // Rare case, no need to have Thread::Current be a parameter.
387   if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
388     // Someone else acquired the lock and expanded the mark stack before us.
389     return;
390   }
391   std::vector<StackReference<mirror::Object>> temp(mark_stack_->Begin(), mark_stack_->End());
392   CHECK_LE(mark_stack_->Size(), new_size);
393   mark_stack_->Resize(new_size);
394   for (auto& obj : temp) {
395     mark_stack_->PushBack(obj.AsMirrorPtr());
396   }
397 }
398 
MarkObject(mirror::Object * obj)399 mirror::Object* MarkSweep::MarkObject(mirror::Object* obj) {
400   MarkObject(obj, nullptr, MemberOffset(0));
401   return obj;
402 }
403 
MarkObjectNonNullParallel(mirror::Object * obj)404 inline void MarkSweep::MarkObjectNonNullParallel(mirror::Object* obj) {
405   DCHECK(obj != nullptr);
406   if (MarkObjectParallel(obj)) {
407     MutexLock mu(Thread::Current(), mark_stack_lock_);
408     if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
409       ExpandMarkStack();
410     }
411     // The object must be pushed on to the mark stack.
412     mark_stack_->PushBack(obj);
413   }
414 }
415 
IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object> * ref,bool do_atomic_update ATTRIBUTE_UNUSED)416 bool MarkSweep::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
417                                             bool do_atomic_update ATTRIBUTE_UNUSED) {
418   mirror::Object* obj = ref->AsMirrorPtr();
419   if (obj == nullptr) {
420     return true;
421   }
422   return IsMarked(obj);
423 }
424 
425 class MarkSweep::MarkObjectSlowPath {
426  public:
MarkObjectSlowPath(MarkSweep * mark_sweep,mirror::Object * holder=nullptr,MemberOffset offset=MemberOffset (0))427   explicit MarkObjectSlowPath(MarkSweep* mark_sweep,
428                               mirror::Object* holder = nullptr,
429                               MemberOffset offset = MemberOffset(0))
430       : mark_sweep_(mark_sweep),
431         holder_(holder),
432         offset_(offset) {}
433 
operator ()(const mirror::Object * obj) const434   void operator()(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
435     if (kProfileLargeObjects) {
436       // TODO: Differentiate between marking and testing somehow.
437       ++mark_sweep_->large_object_test_;
438       ++mark_sweep_->large_object_mark_;
439     }
440     space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace();
441     if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) ||
442                  (kIsDebugBuild && large_object_space != nullptr &&
443                      !large_object_space->Contains(obj)))) {
444       // Lowest priority logging first:
445       PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
446       MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), true);
447       // Buffer the output in the string stream since it is more important than the stack traces
448       // and we want it to have log priority. The stack traces are printed from Runtime::Abort
449       // which is called from LOG(FATAL) but before the abort message.
450       std::ostringstream oss;
451       oss << "Tried to mark " << obj << " not contained by any spaces" << std::endl;
452       if (holder_ != nullptr) {
453         size_t holder_size = holder_->SizeOf();
454         ArtField* field = holder_->FindFieldByOffset(offset_);
455         oss << "Field info: "
456             << " holder=" << holder_
457             << " holder is "
458             << (mark_sweep_->GetHeap()->IsLiveObjectLocked(holder_)
459                 ? "alive" : "dead")
460             << " holder_size=" << holder_size
461             << " holder_type=" << holder_->PrettyTypeOf()
462             << " offset=" << offset_.Uint32Value()
463             << " field=" << (field != nullptr ? field->GetName() : "nullptr")
464             << " field_type="
465             << (field != nullptr ? field->GetTypeDescriptor() : "")
466             << " first_ref_field_offset="
467             << (holder_->IsClass()
468                 ? holder_->AsClass()->GetFirstReferenceStaticFieldOffset(
469                     kRuntimePointerSize)
470                 : holder_->GetClass()->GetFirstReferenceInstanceFieldOffset())
471             << " num_of_ref_fields="
472             << (holder_->IsClass()
473                 ? holder_->AsClass()->NumReferenceStaticFields()
474                 : holder_->GetClass()->NumReferenceInstanceFields())
475             << std::endl;
476         // Print the memory content of the holder.
477         for (size_t i = 0; i < holder_size / sizeof(uint32_t); ++i) {
478           uint32_t* p = reinterpret_cast<uint32_t*>(holder_);
479           oss << &p[i] << ": " << "holder+" << (i * sizeof(uint32_t)) << " = " << std::hex << p[i]
480               << std::endl;
481         }
482       }
483       oss << "Attempting see if it's a bad thread root" << std::endl;
484       mark_sweep_->VerifySuspendedThreadRoots(oss);
485       LOG(FATAL) << oss.str();
486     }
487   }
488 
489  private:
490   MarkSweep* const mark_sweep_;
491   mirror::Object* const holder_;
492   MemberOffset offset_;
493 };
494 
MarkObjectNonNull(mirror::Object * obj,mirror::Object * holder,MemberOffset offset)495 inline void MarkSweep::MarkObjectNonNull(mirror::Object* obj,
496                                          mirror::Object* holder,
497                                          MemberOffset offset) {
498   DCHECK(obj != nullptr);
499   if (kUseBakerReadBarrier) {
500     // Verify all the objects have the correct state installed.
501     obj->AssertReadBarrierState();
502   }
503   if (immune_spaces_.IsInImmuneRegion(obj)) {
504     if (kCountMarkedObjects) {
505       ++mark_immune_count_;
506     }
507     DCHECK(mark_bitmap_->Test(obj));
508   } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) {
509     if (kCountMarkedObjects) {
510       ++mark_fastpath_count_;
511     }
512     if (UNLIKELY(!current_space_bitmap_->Set(obj))) {
513       PushOnMarkStack(obj);  // This object was not previously marked.
514     }
515   } else {
516     if (kCountMarkedObjects) {
517       ++mark_slowpath_count_;
518     }
519     MarkObjectSlowPath visitor(this, holder, offset);
520     // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set
521     // will check again.
522     if (!mark_bitmap_->Set(obj, visitor)) {
523       PushOnMarkStack(obj);  // Was not already marked, push.
524     }
525   }
526 }
527 
PushOnMarkStack(mirror::Object * obj)528 inline void MarkSweep::PushOnMarkStack(mirror::Object* obj) {
529   if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
530     // Lock is not needed but is here anyways to please annotalysis.
531     MutexLock mu(Thread::Current(), mark_stack_lock_);
532     ExpandMarkStack();
533   }
534   // The object must be pushed on to the mark stack.
535   mark_stack_->PushBack(obj);
536 }
537 
MarkObjectParallel(mirror::Object * obj)538 inline bool MarkSweep::MarkObjectParallel(mirror::Object* obj) {
539   DCHECK(obj != nullptr);
540   if (kUseBakerReadBarrier) {
541     // Verify all the objects have the correct state installed.
542     obj->AssertReadBarrierState();
543   }
544   if (immune_spaces_.IsInImmuneRegion(obj)) {
545     DCHECK(IsMarked(obj) != nullptr);
546     return false;
547   }
548   // Try to take advantage of locality of references within a space, failing this find the space
549   // the hard way.
550   accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_;
551   if (LIKELY(object_bitmap->HasAddress(obj))) {
552     return !object_bitmap->AtomicTestAndSet(obj);
553   }
554   MarkObjectSlowPath visitor(this);
555   return !mark_bitmap_->AtomicTestAndSet(obj, visitor);
556 }
557 
MarkHeapReference(mirror::HeapReference<mirror::Object> * ref,bool do_atomic_update ATTRIBUTE_UNUSED)558 void MarkSweep::MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
559                                   bool do_atomic_update ATTRIBUTE_UNUSED) {
560   MarkObject(ref->AsMirrorPtr(), nullptr, MemberOffset(0));
561 }
562 
563 // Used to mark objects when processing the mark stack. If an object is null, it is not marked.
MarkObject(mirror::Object * obj,mirror::Object * holder,MemberOffset offset)564 inline void MarkSweep::MarkObject(mirror::Object* obj,
565                                   mirror::Object* holder,
566                                   MemberOffset offset) {
567   if (obj != nullptr) {
568     MarkObjectNonNull(obj, holder, offset);
569   } else if (kCountMarkedObjects) {
570     ++mark_null_count_;
571   }
572 }
573 
574 class MarkSweep::VerifyRootMarkedVisitor : public SingleRootVisitor {
575  public:
VerifyRootMarkedVisitor(MarkSweep * collector)576   explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { }
577 
VisitRoot(mirror::Object * root,const RootInfo & info)578   void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
579       REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
580     CHECK(collector_->IsMarked(root) != nullptr) << info.ToString();
581   }
582 
583  private:
584   MarkSweep* const collector_;
585 };
586 
VisitRoots(mirror::Object *** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)587 void MarkSweep::VisitRoots(mirror::Object*** roots,
588                            size_t count,
589                            const RootInfo& info ATTRIBUTE_UNUSED) {
590   for (size_t i = 0; i < count; ++i) {
591     MarkObjectNonNull(*roots[i]);
592   }
593 }
594 
VisitRoots(mirror::CompressedReference<mirror::Object> ** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)595 void MarkSweep::VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
596                            size_t count,
597                            const RootInfo& info ATTRIBUTE_UNUSED) {
598   for (size_t i = 0; i < count; ++i) {
599     MarkObjectNonNull(roots[i]->AsMirrorPtr());
600   }
601 }
602 
603 class MarkSweep::VerifyRootVisitor : public SingleRootVisitor {
604  public:
VerifyRootVisitor(std::ostream & os)605   explicit VerifyRootVisitor(std::ostream& os) : os_(os) {}
606 
VisitRoot(mirror::Object * root,const RootInfo & info)607   void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
608       REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
609     // See if the root is on any space bitmap.
610     auto* heap = Runtime::Current()->GetHeap();
611     if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
612       space::LargeObjectSpace* large_object_space = heap->GetLargeObjectsSpace();
613       if (large_object_space != nullptr && !large_object_space->Contains(root)) {
614         os_ << "Found invalid root: " << root << " " << info << std::endl;
615       }
616     }
617   }
618 
619  private:
620   std::ostream& os_;
621 };
622 
VerifySuspendedThreadRoots(std::ostream & os)623 void MarkSweep::VerifySuspendedThreadRoots(std::ostream& os) {
624   VerifyRootVisitor visitor(os);
625   Runtime::Current()->GetThreadList()->VisitRootsForSuspendedThreads(&visitor);
626 }
627 
MarkRoots(Thread * self)628 void MarkSweep::MarkRoots(Thread* self) {
629   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
630   if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
631     // If we exclusively hold the mutator lock, all threads must be suspended.
632     Runtime::Current()->VisitRoots(this);
633     RevokeAllThreadLocalAllocationStacks(self);
634   } else {
635     MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint);
636     // At this point the live stack should no longer have any mutators which push into it.
637     MarkNonThreadRoots();
638     MarkConcurrentRoots(
639         static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots));
640   }
641 }
642 
MarkNonThreadRoots()643 void MarkSweep::MarkNonThreadRoots() {
644   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
645   Runtime::Current()->VisitNonThreadRoots(this);
646 }
647 
MarkConcurrentRoots(VisitRootFlags flags)648 void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
649   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
650   // Visit all runtime roots and clear dirty flags.
651   Runtime::Current()->VisitConcurrentRoots(this, flags);
652 }
653 
654 class MarkSweep::DelayReferenceReferentVisitor {
655  public:
DelayReferenceReferentVisitor(MarkSweep * collector)656   explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {}
657 
operator ()(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref) const658   void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
659       REQUIRES(Locks::heap_bitmap_lock_)
660       REQUIRES_SHARED(Locks::mutator_lock_) {
661     collector_->DelayReferenceReferent(klass, ref);
662   }
663 
664  private:
665   MarkSweep* const collector_;
666 };
667 
668 template <bool kUseFinger = false>
669 class MarkSweep::MarkStackTask : public Task {
670  public:
MarkStackTask(ThreadPool * thread_pool,MarkSweep * mark_sweep,size_t mark_stack_size,StackReference<mirror::Object> * mark_stack)671   MarkStackTask(ThreadPool* thread_pool,
672                 MarkSweep* mark_sweep,
673                 size_t mark_stack_size,
674                 StackReference<mirror::Object>* mark_stack)
675       : mark_sweep_(mark_sweep),
676         thread_pool_(thread_pool),
677         mark_stack_pos_(mark_stack_size) {
678     // We may have to copy part of an existing mark stack when another mark stack overflows.
679     if (mark_stack_size != 0) {
680       DCHECK(mark_stack != nullptr);
681       // TODO: Check performance?
682       std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
683     }
684     if (kCountTasks) {
685       ++mark_sweep_->work_chunks_created_;
686     }
687   }
688 
689   static const size_t kMaxSize = 1 * KB;
690 
691  protected:
692   class MarkObjectParallelVisitor {
693    public:
MarkObjectParallelVisitor(MarkStackTask<kUseFinger> * chunk_task,MarkSweep * mark_sweep)694     ALWAYS_INLINE MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task,
695                                             MarkSweep* mark_sweep)
696         : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
697 
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static ATTRIBUTE_UNUSED) const698     ALWAYS_INLINE void operator()(mirror::Object* obj,
699                     MemberOffset offset,
700                     bool is_static ATTRIBUTE_UNUSED) const
701         REQUIRES_SHARED(Locks::mutator_lock_) {
702       Mark(obj->GetFieldObject<mirror::Object>(offset));
703     }
704 
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const705     void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
706         REQUIRES_SHARED(Locks::mutator_lock_) {
707       if (!root->IsNull()) {
708         VisitRoot(root);
709       }
710     }
711 
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const712     void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
713         REQUIRES_SHARED(Locks::mutator_lock_) {
714       if (kCheckLocks) {
715         Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
716         Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
717       }
718       Mark(root->AsMirrorPtr());
719     }
720 
721    private:
Mark(mirror::Object * ref) const722     ALWAYS_INLINE void Mark(mirror::Object* ref) const REQUIRES_SHARED(Locks::mutator_lock_) {
723       if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
724         if (kUseFinger) {
725           std::atomic_thread_fence(std::memory_order_seq_cst);
726           if (reinterpret_cast<uintptr_t>(ref) >=
727               static_cast<uintptr_t>(mark_sweep_->atomic_finger_.LoadRelaxed())) {
728             return;
729           }
730         }
731         chunk_task_->MarkStackPush(ref);
732       }
733     }
734 
735     MarkStackTask<kUseFinger>* const chunk_task_;
736     MarkSweep* const mark_sweep_;
737   };
738 
739   class ScanObjectParallelVisitor {
740    public:
ScanObjectParallelVisitor(MarkStackTask<kUseFinger> * chunk_task)741     ALWAYS_INLINE explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task)
742         : chunk_task_(chunk_task) {}
743 
744     // No thread safety analysis since multiple threads will use this visitor.
operator ()(mirror::Object * obj) const745     void operator()(mirror::Object* obj) const
746         REQUIRES(Locks::heap_bitmap_lock_)
747         REQUIRES_SHARED(Locks::mutator_lock_) {
748       MarkSweep* const mark_sweep = chunk_task_->mark_sweep_;
749       MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep);
750       DelayReferenceReferentVisitor ref_visitor(mark_sweep);
751       mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor);
752     }
753 
754    private:
755     MarkStackTask<kUseFinger>* const chunk_task_;
756   };
757 
~MarkStackTask()758   virtual ~MarkStackTask() {
759     // Make sure that we have cleared our mark stack.
760     DCHECK_EQ(mark_stack_pos_, 0U);
761     if (kCountTasks) {
762       ++mark_sweep_->work_chunks_deleted_;
763     }
764   }
765 
766   MarkSweep* const mark_sweep_;
767   ThreadPool* const thread_pool_;
768   // Thread local mark stack for this task.
769   StackReference<mirror::Object> mark_stack_[kMaxSize];
770   // Mark stack position.
771   size_t mark_stack_pos_;
772 
MarkStackPush(mirror::Object * obj)773   ALWAYS_INLINE void MarkStackPush(mirror::Object* obj)
774       REQUIRES_SHARED(Locks::mutator_lock_) {
775     if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
776       // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
777       mark_stack_pos_ /= 2;
778       auto* task = new MarkStackTask(thread_pool_,
779                                      mark_sweep_,
780                                      kMaxSize - mark_stack_pos_,
781                                      mark_stack_ + mark_stack_pos_);
782       thread_pool_->AddTask(Thread::Current(), task);
783     }
784     DCHECK(obj != nullptr);
785     DCHECK_LT(mark_stack_pos_, kMaxSize);
786     mark_stack_[mark_stack_pos_++].Assign(obj);
787   }
788 
Finalize()789   virtual void Finalize() {
790     delete this;
791   }
792 
793   // Scans all of the objects
Run(Thread * self ATTRIBUTE_UNUSED)794   virtual void Run(Thread* self ATTRIBUTE_UNUSED)
795       REQUIRES(Locks::heap_bitmap_lock_)
796       REQUIRES_SHARED(Locks::mutator_lock_) {
797     ScanObjectParallelVisitor visitor(this);
798     // TODO: Tune this.
799     static const size_t kFifoSize = 4;
800     BoundedFifoPowerOfTwo<mirror::Object*, kFifoSize> prefetch_fifo;
801     for (;;) {
802       mirror::Object* obj = nullptr;
803       if (kUseMarkStackPrefetch) {
804         while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
805           mirror::Object* const mark_stack_obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr();
806           DCHECK(mark_stack_obj != nullptr);
807           __builtin_prefetch(mark_stack_obj);
808           prefetch_fifo.push_back(mark_stack_obj);
809         }
810         if (UNLIKELY(prefetch_fifo.empty())) {
811           break;
812         }
813         obj = prefetch_fifo.front();
814         prefetch_fifo.pop_front();
815       } else {
816         if (UNLIKELY(mark_stack_pos_ == 0)) {
817           break;
818         }
819         obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr();
820       }
821       DCHECK(obj != nullptr);
822       visitor(obj);
823     }
824   }
825 };
826 
827 class MarkSweep::CardScanTask : public MarkStackTask<false> {
828  public:
CardScanTask(ThreadPool * thread_pool,MarkSweep * mark_sweep,accounting::ContinuousSpaceBitmap * bitmap,uint8_t * begin,uint8_t * end,uint8_t minimum_age,size_t mark_stack_size,StackReference<mirror::Object> * mark_stack_obj,bool clear_card)829   CardScanTask(ThreadPool* thread_pool,
830                MarkSweep* mark_sweep,
831                accounting::ContinuousSpaceBitmap* bitmap,
832                uint8_t* begin,
833                uint8_t* end,
834                uint8_t minimum_age,
835                size_t mark_stack_size,
836                StackReference<mirror::Object>* mark_stack_obj,
837                bool clear_card)
838       : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
839         bitmap_(bitmap),
840         begin_(begin),
841         end_(end),
842         minimum_age_(minimum_age),
843         clear_card_(clear_card) {}
844 
845  protected:
846   accounting::ContinuousSpaceBitmap* const bitmap_;
847   uint8_t* const begin_;
848   uint8_t* const end_;
849   const uint8_t minimum_age_;
850   const bool clear_card_;
851 
Finalize()852   virtual void Finalize() {
853     delete this;
854   }
855 
Run(Thread * self)856   virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
857     ScanObjectParallelVisitor visitor(this);
858     accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
859     size_t cards_scanned = clear_card_
860         ? card_table->Scan<true>(bitmap_, begin_, end_, visitor, minimum_age_)
861         : card_table->Scan<false>(bitmap_, begin_, end_, visitor, minimum_age_);
862     VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - "
863         << reinterpret_cast<void*>(end_) << " = " << cards_scanned;
864     // Finish by emptying our local mark stack.
865     MarkStackTask::Run(self);
866   }
867 };
868 
GetThreadCount(bool paused) const869 size_t MarkSweep::GetThreadCount(bool paused) const {
870   // Use less threads if we are in a background state (non jank perceptible) since we want to leave
871   // more CPU time for the foreground apps.
872   if (heap_->GetThreadPool() == nullptr || !Runtime::Current()->InJankPerceptibleProcessState()) {
873     return 1;
874   }
875   return (paused ? heap_->GetParallelGCThreadCount() : heap_->GetConcGCThreadCount()) + 1;
876 }
877 
ScanGrayObjects(bool paused,uint8_t minimum_age)878 void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) {
879   accounting::CardTable* card_table = GetHeap()->GetCardTable();
880   ThreadPool* thread_pool = GetHeap()->GetThreadPool();
881   size_t thread_count = GetThreadCount(paused);
882   // The parallel version with only one thread is faster for card scanning, TODO: fix.
883   if (kParallelCardScan && thread_count > 1) {
884     Thread* self = Thread::Current();
885     // Can't have a different split for each space since multiple spaces can have their cards being
886     // scanned at the same time.
887     TimingLogger::ScopedTiming t(paused ? "(Paused)ScanGrayObjects" : __FUNCTION__,
888         GetTimings());
889     // Try to take some of the mark stack since we can pass this off to the worker tasks.
890     StackReference<mirror::Object>* mark_stack_begin = mark_stack_->Begin();
891     StackReference<mirror::Object>* mark_stack_end = mark_stack_->End();
892     const size_t mark_stack_size = mark_stack_end - mark_stack_begin;
893     // Estimated number of work tasks we will create.
894     const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
895     DCHECK_NE(mark_stack_tasks, 0U);
896     const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
897                                              mark_stack_size / mark_stack_tasks + 1);
898     for (const auto& space : GetHeap()->GetContinuousSpaces()) {
899       if (space->GetMarkBitmap() == nullptr) {
900         continue;
901       }
902       uint8_t* card_begin = space->Begin();
903       uint8_t* card_end = space->End();
904       // Align up the end address. For example, the image space's end
905       // may not be card-size-aligned.
906       card_end = AlignUp(card_end, accounting::CardTable::kCardSize);
907       DCHECK_ALIGNED(card_begin, accounting::CardTable::kCardSize);
908       DCHECK_ALIGNED(card_end, accounting::CardTable::kCardSize);
909       // Calculate how many bytes of heap we will scan,
910       const size_t address_range = card_end - card_begin;
911       // Calculate how much address range each task gets.
912       const size_t card_delta = RoundUp(address_range / thread_count + 1,
913                                         accounting::CardTable::kCardSize);
914       // If paused and the space is neither zygote nor image space, we could clear the dirty
915       // cards to avoid accumulating them to increase card scanning load in the following GC
916       // cycles. We need to keep dirty cards of image space and zygote space in order to track
917       // references to the other spaces.
918       bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace();
919       // Create the worker tasks for this space.
920       while (card_begin != card_end) {
921         // Add a range of cards.
922         size_t addr_remaining = card_end - card_begin;
923         size_t card_increment = std::min(card_delta, addr_remaining);
924         // Take from the back of the mark stack.
925         size_t mark_stack_remaining = mark_stack_end - mark_stack_begin;
926         size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining);
927         mark_stack_end -= mark_stack_increment;
928         mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
929         DCHECK_EQ(mark_stack_end, mark_stack_->End());
930         // Add the new task to the thread pool.
931         auto* task = new CardScanTask(thread_pool,
932                                       this,
933                                       space->GetMarkBitmap(),
934                                       card_begin,
935                                       card_begin + card_increment,
936                                       minimum_age,
937                                       mark_stack_increment,
938                                       mark_stack_end,
939                                       clear_card);
940         thread_pool->AddTask(self, task);
941         card_begin += card_increment;
942       }
943     }
944 
945     // Note: the card scan below may dirty new cards (and scan them)
946     // as a side effect when a Reference object is encountered and
947     // queued during the marking. See b/11465268.
948     thread_pool->SetMaxActiveWorkers(thread_count - 1);
949     thread_pool->StartWorkers(self);
950     thread_pool->Wait(self, true, true);
951     thread_pool->StopWorkers(self);
952   } else {
953     for (const auto& space : GetHeap()->GetContinuousSpaces()) {
954       if (space->GetMarkBitmap() != nullptr) {
955         // Image spaces are handled properly since live == marked for them.
956         const char* name = nullptr;
957         switch (space->GetGcRetentionPolicy()) {
958         case space::kGcRetentionPolicyNeverCollect:
959           name = paused ? "(Paused)ScanGrayImageSpaceObjects" : "ScanGrayImageSpaceObjects";
960           break;
961         case space::kGcRetentionPolicyFullCollect:
962           name = paused ? "(Paused)ScanGrayZygoteSpaceObjects" : "ScanGrayZygoteSpaceObjects";
963           break;
964         case space::kGcRetentionPolicyAlwaysCollect:
965           name = paused ? "(Paused)ScanGrayAllocSpaceObjects" : "ScanGrayAllocSpaceObjects";
966           break;
967         default:
968           LOG(FATAL) << "Unreachable";
969           UNREACHABLE();
970         }
971         TimingLogger::ScopedTiming t(name, GetTimings());
972         ScanObjectVisitor visitor(this);
973         bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace();
974         if (clear_card) {
975           card_table->Scan<true>(space->GetMarkBitmap(),
976                                  space->Begin(),
977                                  space->End(),
978                                  visitor,
979                                  minimum_age);
980         } else {
981           card_table->Scan<false>(space->GetMarkBitmap(),
982                                   space->Begin(),
983                                   space->End(),
984                                   visitor,
985                                   minimum_age);
986         }
987       }
988     }
989   }
990 }
991 
992 class MarkSweep::RecursiveMarkTask : public MarkStackTask<false> {
993  public:
RecursiveMarkTask(ThreadPool * thread_pool,MarkSweep * mark_sweep,accounting::ContinuousSpaceBitmap * bitmap,uintptr_t begin,uintptr_t end)994   RecursiveMarkTask(ThreadPool* thread_pool,
995                     MarkSweep* mark_sweep,
996                     accounting::ContinuousSpaceBitmap* bitmap,
997                     uintptr_t begin,
998                     uintptr_t end)
999       : MarkStackTask<false>(thread_pool, mark_sweep, 0, nullptr),
1000         bitmap_(bitmap),
1001         begin_(begin),
1002         end_(end) {}
1003 
1004  protected:
1005   accounting::ContinuousSpaceBitmap* const bitmap_;
1006   const uintptr_t begin_;
1007   const uintptr_t end_;
1008 
Finalize()1009   virtual void Finalize() {
1010     delete this;
1011   }
1012 
1013   // Scans all of the objects
Run(Thread * self)1014   virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
1015     ScanObjectParallelVisitor visitor(this);
1016     bitmap_->VisitMarkedRange(begin_, end_, visitor);
1017     // Finish by emptying our local mark stack.
1018     MarkStackTask::Run(self);
1019   }
1020 };
1021 
1022 // Populates the mark stack based on the set of marked objects and
1023 // recursively marks until the mark stack is emptied.
RecursiveMark()1024 void MarkSweep::RecursiveMark() {
1025   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1026   // RecursiveMark will build the lists of known instances of the Reference classes. See
1027   // DelayReferenceReferent for details.
1028   if (kUseRecursiveMark) {
1029     const bool partial = GetGcType() == kGcTypePartial;
1030     ScanObjectVisitor scan_visitor(this);
1031     auto* self = Thread::Current();
1032     ThreadPool* thread_pool = heap_->GetThreadPool();
1033     size_t thread_count = GetThreadCount(false);
1034     const bool parallel = kParallelRecursiveMark && thread_count > 1;
1035     mark_stack_->Reset();
1036     for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1037       if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
1038           (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
1039         current_space_bitmap_ = space->GetMarkBitmap();
1040         if (current_space_bitmap_ == nullptr) {
1041           continue;
1042         }
1043         if (parallel) {
1044           // We will use the mark stack the future.
1045           // CHECK(mark_stack_->IsEmpty());
1046           // This function does not handle heap end increasing, so we must use the space end.
1047           uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
1048           uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
1049           atomic_finger_.StoreRelaxed(AtomicInteger::MaxValue());
1050 
1051           // Create a few worker tasks.
1052           const size_t n = thread_count * 2;
1053           while (begin != end) {
1054             uintptr_t start = begin;
1055             uintptr_t delta = (end - begin) / n;
1056             delta = RoundUp(delta, KB);
1057             if (delta < 16 * KB) delta = end - begin;
1058             begin += delta;
1059             auto* task = new RecursiveMarkTask(thread_pool,
1060                                                this,
1061                                                current_space_bitmap_,
1062                                                start,
1063                                                begin);
1064             thread_pool->AddTask(self, task);
1065           }
1066           thread_pool->SetMaxActiveWorkers(thread_count - 1);
1067           thread_pool->StartWorkers(self);
1068           thread_pool->Wait(self, true, true);
1069           thread_pool->StopWorkers(self);
1070         } else {
1071           // This function does not handle heap end increasing, so we must use the space end.
1072           uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
1073           uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
1074           current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
1075         }
1076       }
1077     }
1078   }
1079   ProcessMarkStack(false);
1080 }
1081 
RecursiveMarkDirtyObjects(bool paused,uint8_t minimum_age)1082 void MarkSweep::RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) {
1083   ScanGrayObjects(paused, minimum_age);
1084   ProcessMarkStack(paused);
1085 }
1086 
ReMarkRoots()1087 void MarkSweep::ReMarkRoots() {
1088   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1089   Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
1090   Runtime::Current()->VisitRoots(this, static_cast<VisitRootFlags>(
1091       kVisitRootFlagNewRoots | kVisitRootFlagStopLoggingNewRoots | kVisitRootFlagClearRootLog));
1092   if (kVerifyRootsMarked) {
1093     TimingLogger::ScopedTiming t2("(Paused)VerifyRoots", GetTimings());
1094     VerifyRootMarkedVisitor visitor(this);
1095     Runtime::Current()->VisitRoots(&visitor);
1096   }
1097 }
1098 
SweepSystemWeaks(Thread * self)1099 void MarkSweep::SweepSystemWeaks(Thread* self) {
1100   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1101   ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1102   Runtime::Current()->SweepSystemWeaks(this);
1103 }
1104 
1105 class MarkSweep::VerifySystemWeakVisitor : public IsMarkedVisitor {
1106  public:
VerifySystemWeakVisitor(MarkSweep * mark_sweep)1107   explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
1108 
IsMarked(mirror::Object * obj)1109   virtual mirror::Object* IsMarked(mirror::Object* obj)
1110       OVERRIDE
1111       REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1112     mark_sweep_->VerifyIsLive(obj);
1113     return obj;
1114   }
1115 
1116   MarkSweep* const mark_sweep_;
1117 };
1118 
VerifyIsLive(const mirror::Object * obj)1119 void MarkSweep::VerifyIsLive(const mirror::Object* obj) {
1120   if (!heap_->GetLiveBitmap()->Test(obj)) {
1121     // TODO: Consider live stack? Has this code bitrotted?
1122     CHECK(!heap_->allocation_stack_->Contains(obj))
1123         << "Found dead object " << obj << "\n" << heap_->DumpSpaces();
1124   }
1125 }
1126 
VerifySystemWeaks()1127 void MarkSweep::VerifySystemWeaks() {
1128   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1129   // Verify system weaks, uses a special object visitor which returns the input object.
1130   VerifySystemWeakVisitor visitor(this);
1131   Runtime::Current()->SweepSystemWeaks(&visitor);
1132 }
1133 
1134 class MarkSweep::CheckpointMarkThreadRoots : public Closure, public RootVisitor {
1135  public:
CheckpointMarkThreadRoots(MarkSweep * mark_sweep,bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)1136   CheckpointMarkThreadRoots(MarkSweep* mark_sweep,
1137                             bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
1138       : mark_sweep_(mark_sweep),
1139         revoke_ros_alloc_thread_local_buffers_at_checkpoint_(
1140             revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
1141   }
1142 
VisitRoots(mirror::Object *** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)1143   void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
1144       OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
1145       REQUIRES(Locks::heap_bitmap_lock_) {
1146     for (size_t i = 0; i < count; ++i) {
1147       mark_sweep_->MarkObjectNonNullParallel(*roots[i]);
1148     }
1149   }
1150 
VisitRoots(mirror::CompressedReference<mirror::Object> ** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)1151   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
1152                   size_t count,
1153                   const RootInfo& info ATTRIBUTE_UNUSED)
1154       OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
1155       REQUIRES(Locks::heap_bitmap_lock_) {
1156     for (size_t i = 0; i < count; ++i) {
1157       mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr());
1158     }
1159   }
1160 
Run(Thread * thread)1161   virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1162     ScopedTrace trace("Marking thread roots");
1163     // Note: self is not necessarily equal to thread since thread may be suspended.
1164     Thread* const self = Thread::Current();
1165     CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
1166         << thread->GetState() << " thread " << thread << " self " << self;
1167     thread->VisitRoots(this, kVisitRootFlagAllRoots);
1168     if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) {
1169       ScopedTrace trace2("RevokeRosAllocThreadLocalBuffers");
1170       mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread);
1171     }
1172     // If thread is a running mutator, then act on behalf of the garbage collector.
1173     // See the code in ThreadList::RunCheckpoint.
1174     mark_sweep_->GetBarrier().Pass(self);
1175   }
1176 
1177  private:
1178   MarkSweep* const mark_sweep_;
1179   const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_;
1180 };
1181 
MarkRootsCheckpoint(Thread * self,bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)1182 void MarkSweep::MarkRootsCheckpoint(Thread* self,
1183                                     bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
1184   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1185   CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint);
1186   ThreadList* thread_list = Runtime::Current()->GetThreadList();
1187   // Request the check point is run on all threads returning a count of the threads that must
1188   // run through the barrier including self.
1189   size_t barrier_count = thread_list->RunCheckpoint(&check_point);
1190   // Release locks then wait for all mutator threads to pass the barrier.
1191   // If there are no threads to wait which implys that all the checkpoint functions are finished,
1192   // then no need to release locks.
1193   if (barrier_count == 0) {
1194     return;
1195   }
1196   Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
1197   Locks::mutator_lock_->SharedUnlock(self);
1198   {
1199     ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1200     gc_barrier_->Increment(self, barrier_count);
1201   }
1202   Locks::mutator_lock_->SharedLock(self);
1203   Locks::heap_bitmap_lock_->ExclusiveLock(self);
1204 }
1205 
SweepArray(accounting::ObjectStack * allocations,bool swap_bitmaps)1206 void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
1207   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1208   Thread* self = Thread::Current();
1209   mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>(
1210       sweep_array_free_buffer_mem_map_->BaseBegin());
1211   size_t chunk_free_pos = 0;
1212   ObjectBytePair freed;
1213   ObjectBytePair freed_los;
1214   // How many objects are left in the array, modified after each space is swept.
1215   StackReference<mirror::Object>* objects = allocations->Begin();
1216   size_t count = allocations->Size();
1217   // Change the order to ensure that the non-moving space last swept as an optimization.
1218   std::vector<space::ContinuousSpace*> sweep_spaces;
1219   space::ContinuousSpace* non_moving_space = nullptr;
1220   for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
1221     if (space->IsAllocSpace() &&
1222         !immune_spaces_.ContainsSpace(space) &&
1223         space->GetLiveBitmap() != nullptr) {
1224       if (space == heap_->GetNonMovingSpace()) {
1225         non_moving_space = space;
1226       } else {
1227         sweep_spaces.push_back(space);
1228       }
1229     }
1230   }
1231   // Unlikely to sweep a significant amount of non_movable objects, so we do these after
1232   // the other alloc spaces as an optimization.
1233   if (non_moving_space != nullptr) {
1234     sweep_spaces.push_back(non_moving_space);
1235   }
1236   // Start by sweeping the continuous spaces.
1237   for (space::ContinuousSpace* space : sweep_spaces) {
1238     space::AllocSpace* alloc_space = space->AsAllocSpace();
1239     accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1240     accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1241     if (swap_bitmaps) {
1242       std::swap(live_bitmap, mark_bitmap);
1243     }
1244     StackReference<mirror::Object>* out = objects;
1245     for (size_t i = 0; i < count; ++i) {
1246       mirror::Object* const obj = objects[i].AsMirrorPtr();
1247       if (kUseThreadLocalAllocationStack && obj == nullptr) {
1248         continue;
1249       }
1250       if (space->HasAddress(obj)) {
1251         // This object is in the space, remove it from the array and add it to the sweep buffer
1252         // if needed.
1253         if (!mark_bitmap->Test(obj)) {
1254           if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
1255             TimingLogger::ScopedTiming t2("FreeList", GetTimings());
1256             freed.objects += chunk_free_pos;
1257             freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1258             chunk_free_pos = 0;
1259           }
1260           chunk_free_buffer[chunk_free_pos++] = obj;
1261         }
1262       } else {
1263         (out++)->Assign(obj);
1264       }
1265     }
1266     if (chunk_free_pos > 0) {
1267       TimingLogger::ScopedTiming t2("FreeList", GetTimings());
1268       freed.objects += chunk_free_pos;
1269       freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1270       chunk_free_pos = 0;
1271     }
1272     // All of the references which space contained are no longer in the allocation stack, update
1273     // the count.
1274     count = out - objects;
1275   }
1276   // Handle the large object space.
1277   space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
1278   if (large_object_space != nullptr) {
1279     accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
1280     accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
1281     if (swap_bitmaps) {
1282       std::swap(large_live_objects, large_mark_objects);
1283     }
1284     for (size_t i = 0; i < count; ++i) {
1285       mirror::Object* const obj = objects[i].AsMirrorPtr();
1286       // Handle large objects.
1287       if (kUseThreadLocalAllocationStack && obj == nullptr) {
1288         continue;
1289       }
1290       if (!large_mark_objects->Test(obj)) {
1291         ++freed_los.objects;
1292         freed_los.bytes += large_object_space->Free(self, obj);
1293       }
1294     }
1295   }
1296   {
1297     TimingLogger::ScopedTiming t2("RecordFree", GetTimings());
1298     RecordFree(freed);
1299     RecordFreeLOS(freed_los);
1300     t2.NewTiming("ResetStack");
1301     allocations->Reset();
1302   }
1303   sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero();
1304 }
1305 
Sweep(bool swap_bitmaps)1306 void MarkSweep::Sweep(bool swap_bitmaps) {
1307   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1308   // Ensure that nobody inserted items in the live stack after we swapped the stacks.
1309   CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
1310   {
1311     TimingLogger::ScopedTiming t2("MarkAllocStackAsLive", GetTimings());
1312     // Mark everything allocated since the last GC as live so that we can sweep concurrently,
1313     // knowing that new allocations won't be marked as live.
1314     accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1315     heap_->MarkAllocStackAsLive(live_stack);
1316     live_stack->Reset();
1317     DCHECK(mark_stack_->IsEmpty());
1318   }
1319   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1320     if (space->IsContinuousMemMapAllocSpace()) {
1321       space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
1322       TimingLogger::ScopedTiming split(
1323           alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace",
1324           GetTimings());
1325       RecordFree(alloc_space->Sweep(swap_bitmaps));
1326     }
1327   }
1328   SweepLargeObjects(swap_bitmaps);
1329 }
1330 
SweepLargeObjects(bool swap_bitmaps)1331 void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
1332   space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
1333   if (los != nullptr) {
1334     TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
1335     RecordFreeLOS(los->Sweep(swap_bitmaps));
1336   }
1337 }
1338 
1339 // Process the "referent" field lin a java.lang.ref.Reference.  If the referent has not yet been
1340 // marked, put it on the appropriate list in the heap for later processing.
DelayReferenceReferent(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref)1341 void MarkSweep::DelayReferenceReferent(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) {
1342   heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, this);
1343 }
1344 
1345 class MarkVisitor {
1346  public:
MarkVisitor(MarkSweep * const mark_sweep)1347   ALWAYS_INLINE explicit MarkVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
1348 
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static ATTRIBUTE_UNUSED) const1349   ALWAYS_INLINE void operator()(mirror::Object* obj,
1350                                 MemberOffset offset,
1351                                 bool is_static ATTRIBUTE_UNUSED) const
1352       REQUIRES(Locks::heap_bitmap_lock_)
1353       REQUIRES_SHARED(Locks::mutator_lock_) {
1354     if (kCheckLocks) {
1355       Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1356       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1357     }
1358     mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset), obj, offset);
1359   }
1360 
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const1361   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
1362       REQUIRES(Locks::heap_bitmap_lock_)
1363       REQUIRES_SHARED(Locks::mutator_lock_) {
1364     if (!root->IsNull()) {
1365       VisitRoot(root);
1366     }
1367   }
1368 
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const1369   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
1370       REQUIRES(Locks::heap_bitmap_lock_)
1371       REQUIRES_SHARED(Locks::mutator_lock_) {
1372     if (kCheckLocks) {
1373       Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1374       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1375     }
1376     mark_sweep_->MarkObject(root->AsMirrorPtr());
1377   }
1378 
1379  private:
1380   MarkSweep* const mark_sweep_;
1381 };
1382 
1383 // Scans an object reference.  Determines the type of the reference
1384 // and dispatches to a specialized scanning routine.
ScanObject(mirror::Object * obj)1385 void MarkSweep::ScanObject(mirror::Object* obj) {
1386   MarkVisitor mark_visitor(this);
1387   DelayReferenceReferentVisitor ref_visitor(this);
1388   ScanObjectVisit(obj, mark_visitor, ref_visitor);
1389 }
1390 
ProcessMarkStackParallel(size_t thread_count)1391 void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
1392   Thread* self = Thread::Current();
1393   ThreadPool* thread_pool = GetHeap()->GetThreadPool();
1394   const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1,
1395                                      static_cast<size_t>(MarkStackTask<false>::kMaxSize));
1396   CHECK_GT(chunk_size, 0U);
1397   // Split the current mark stack up into work tasks.
1398   for (auto* it = mark_stack_->Begin(), *end = mark_stack_->End(); it < end; ) {
1399     const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size);
1400     thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it));
1401     it += delta;
1402   }
1403   thread_pool->SetMaxActiveWorkers(thread_count - 1);
1404   thread_pool->StartWorkers(self);
1405   thread_pool->Wait(self, true, true);
1406   thread_pool->StopWorkers(self);
1407   mark_stack_->Reset();
1408   CHECK_EQ(work_chunks_created_.LoadSequentiallyConsistent(),
1409            work_chunks_deleted_.LoadSequentiallyConsistent())
1410       << " some of the work chunks were leaked";
1411 }
1412 
1413 // Scan anything that's on the mark stack.
ProcessMarkStack(bool paused)1414 void MarkSweep::ProcessMarkStack(bool paused) {
1415   TimingLogger::ScopedTiming t(paused ? "(Paused)ProcessMarkStack" : __FUNCTION__, GetTimings());
1416   size_t thread_count = GetThreadCount(paused);
1417   if (kParallelProcessMarkStack && thread_count > 1 &&
1418       mark_stack_->Size() >= kMinimumParallelMarkStackSize) {
1419     ProcessMarkStackParallel(thread_count);
1420   } else {
1421     // TODO: Tune this.
1422     static const size_t kFifoSize = 4;
1423     BoundedFifoPowerOfTwo<mirror::Object*, kFifoSize> prefetch_fifo;
1424     for (;;) {
1425       mirror::Object* obj = nullptr;
1426       if (kUseMarkStackPrefetch) {
1427         while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
1428           mirror::Object* mark_stack_obj = mark_stack_->PopBack();
1429           DCHECK(mark_stack_obj != nullptr);
1430           __builtin_prefetch(mark_stack_obj);
1431           prefetch_fifo.push_back(mark_stack_obj);
1432         }
1433         if (prefetch_fifo.empty()) {
1434           break;
1435         }
1436         obj = prefetch_fifo.front();
1437         prefetch_fifo.pop_front();
1438       } else {
1439         if (mark_stack_->IsEmpty()) {
1440           break;
1441         }
1442         obj = mark_stack_->PopBack();
1443       }
1444       DCHECK(obj != nullptr);
1445       ScanObject(obj);
1446     }
1447   }
1448 }
1449 
IsMarked(mirror::Object * object)1450 inline mirror::Object* MarkSweep::IsMarked(mirror::Object* object) {
1451   if (immune_spaces_.IsInImmuneRegion(object)) {
1452     return object;
1453   }
1454   if (current_space_bitmap_->HasAddress(object)) {
1455     return current_space_bitmap_->Test(object) ? object : nullptr;
1456   }
1457   return mark_bitmap_->Test(object) ? object : nullptr;
1458 }
1459 
FinishPhase()1460 void MarkSweep::FinishPhase() {
1461   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1462   if (kCountScannedTypes) {
1463     VLOG(gc)
1464         << "MarkSweep scanned"
1465         << " no reference objects=" << no_reference_class_count_.LoadRelaxed()
1466         << " normal objects=" << normal_count_.LoadRelaxed()
1467         << " classes=" << class_count_.LoadRelaxed()
1468         << " object arrays=" << object_array_count_.LoadRelaxed()
1469         << " references=" << reference_count_.LoadRelaxed()
1470         << " other=" << other_count_.LoadRelaxed();
1471   }
1472   if (kCountTasks) {
1473     VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_.LoadRelaxed();
1474   }
1475   if (kMeasureOverhead) {
1476     VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.LoadRelaxed());
1477   }
1478   if (kProfileLargeObjects) {
1479     VLOG(gc) << "Large objects tested " << large_object_test_.LoadRelaxed()
1480         << " marked " << large_object_mark_.LoadRelaxed();
1481   }
1482   if (kCountMarkedObjects) {
1483     VLOG(gc) << "Marked: null=" << mark_null_count_.LoadRelaxed()
1484         << " immune=" <<  mark_immune_count_.LoadRelaxed()
1485         << " fastpath=" << mark_fastpath_count_.LoadRelaxed()
1486         << " slowpath=" << mark_slowpath_count_.LoadRelaxed();
1487   }
1488   CHECK(mark_stack_->IsEmpty());  // Ensure that the mark stack is empty.
1489   mark_stack_->Reset();
1490   Thread* const self = Thread::Current();
1491   ReaderMutexLock mu(self, *Locks::mutator_lock_);
1492   WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
1493   heap_->ClearMarkedObjects();
1494 }
1495 
RevokeAllThreadLocalBuffers()1496 void MarkSweep::RevokeAllThreadLocalBuffers() {
1497   if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) {
1498     // If concurrent, rosalloc thread-local buffers are revoked at the
1499     // thread checkpoint. Bump pointer space thread-local buffers must
1500     // not be in use.
1501     GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
1502   } else {
1503     TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1504     GetHeap()->RevokeAllThreadLocalBuffers();
1505   }
1506 }
1507 
1508 }  // namespace collector
1509 }  // namespace gc
1510 }  // namespace art
1511