• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "mark_sweep.h"
18 
19 #include <functional>
20 #include <numeric>
21 #include <climits>
22 #include <vector>
23 
24 #include "base/bounded_fifo.h"
25 #include "base/logging.h"
26 #include "base/macros.h"
27 #include "base/mutex-inl.h"
28 #include "base/timing_logger.h"
29 #include "gc/accounting/card_table-inl.h"
30 #include "gc/accounting/heap_bitmap.h"
31 #include "gc/accounting/space_bitmap-inl.h"
32 #include "gc/heap.h"
33 #include "gc/space/image_space.h"
34 #include "gc/space/large_object_space.h"
35 #include "gc/space/space-inl.h"
36 #include "indirect_reference_table.h"
37 #include "intern_table.h"
38 #include "jni_internal.h"
39 #include "monitor.h"
40 #include "mark_sweep-inl.h"
41 #include "mirror/art_field.h"
42 #include "mirror/art_field-inl.h"
43 #include "mirror/class-inl.h"
44 #include "mirror/class_loader.h"
45 #include "mirror/dex_cache.h"
46 #include "mirror/object-inl.h"
47 #include "mirror/object_array.h"
48 #include "mirror/object_array-inl.h"
49 #include "runtime.h"
50 #include "thread-inl.h"
51 #include "thread_list.h"
52 #include "verifier/method_verifier.h"
53 
54 using ::art::mirror::ArtField;
55 using ::art::mirror::Class;
56 using ::art::mirror::Object;
57 using ::art::mirror::ObjectArray;
58 
59 namespace art {
60 namespace gc {
61 namespace collector {
62 
63 // Performance options.
64 constexpr bool kUseRecursiveMark = false;
65 constexpr bool kUseMarkStackPrefetch = true;
66 constexpr size_t kSweepArrayChunkFreeSize = 1024;
67 
68 // Parallelism options.
69 constexpr bool kParallelCardScan = true;
70 constexpr bool kParallelRecursiveMark = true;
71 // Don't attempt to parallelize mark stack processing unless the mark stack is at least n
72 // elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not
73 // having this can add overhead in ProcessReferences since we may end up doing many calls of
74 // ProcessMarkStack with very small mark stacks.
75 constexpr size_t kMinimumParallelMarkStackSize = 128;
76 constexpr bool kParallelProcessMarkStack = true;
77 
78 // Profiling and information flags.
79 constexpr bool kCountClassesMarked = false;
80 constexpr bool kProfileLargeObjects = false;
81 constexpr bool kMeasureOverhead = false;
82 constexpr bool kCountTasks = false;
83 constexpr bool kCountJavaLangRefs = false;
84 
85 // Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
86 constexpr bool kCheckLocks = kDebugLocking;
87 
ImmuneSpace(space::ContinuousSpace * space)88 void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) {
89   // Bind live to mark bitmap if necessary.
90   if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
91     BindLiveToMarkBitmap(space);
92   }
93 
94   // Add the space to the immune region.
95   if (immune_begin_ == NULL) {
96     DCHECK(immune_end_ == NULL);
97     SetImmuneRange(reinterpret_cast<Object*>(space->Begin()),
98                    reinterpret_cast<Object*>(space->End()));
99   } else {
100     const space::ContinuousSpace* prev_space = nullptr;
101     // Find out if the previous space is immune.
102     for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) {
103       if (cur_space == space) {
104         break;
105       }
106       prev_space = cur_space;
107     }
108     // If previous space was immune, then extend the immune region. Relies on continuous spaces
109     // being sorted by Heap::AddContinuousSpace.
110     if (prev_space != NULL &&
111         immune_begin_ <= reinterpret_cast<Object*>(prev_space->Begin()) &&
112         immune_end_ >= reinterpret_cast<Object*>(prev_space->End())) {
113       immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
114       immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
115     }
116   }
117 }
118 
BindBitmaps()119 void MarkSweep::BindBitmaps() {
120   timings_.StartSplit("BindBitmaps");
121   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
122   // Mark all of the spaces we never collect as immune.
123   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
124     if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
125       ImmuneSpace(space);
126     }
127   }
128   timings_.EndSplit();
129 }
130 
MarkSweep(Heap * heap,bool is_concurrent,const std::string & name_prefix)131 MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
132     : GarbageCollector(heap,
133                        name_prefix + (name_prefix.empty() ? "" : " ") +
134                        (is_concurrent ? "concurrent mark sweep": "mark sweep")),
135       current_mark_bitmap_(NULL),
136       java_lang_Class_(NULL),
137       mark_stack_(NULL),
138       immune_begin_(NULL),
139       immune_end_(NULL),
140       soft_reference_list_(NULL),
141       weak_reference_list_(NULL),
142       finalizer_reference_list_(NULL),
143       phantom_reference_list_(NULL),
144       cleared_reference_list_(NULL),
145       gc_barrier_(new Barrier(0)),
146       large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock),
147       mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
148       is_concurrent_(is_concurrent),
149       clear_soft_references_(false) {
150 }
151 
InitializePhase()152 void MarkSweep::InitializePhase() {
153   timings_.Reset();
154   base::TimingLogger::ScopedSplit split("InitializePhase", &timings_);
155   mark_stack_ = heap_->mark_stack_.get();
156   DCHECK(mark_stack_ != nullptr);
157   SetImmuneRange(nullptr, nullptr);
158   soft_reference_list_ = nullptr;
159   weak_reference_list_ = nullptr;
160   finalizer_reference_list_ = nullptr;
161   phantom_reference_list_ = nullptr;
162   cleared_reference_list_ = nullptr;
163   freed_bytes_ = 0;
164   freed_large_object_bytes_ = 0;
165   freed_objects_ = 0;
166   freed_large_objects_ = 0;
167   class_count_ = 0;
168   array_count_ = 0;
169   other_count_ = 0;
170   large_object_test_ = 0;
171   large_object_mark_ = 0;
172   classes_marked_ = 0;
173   overhead_time_ = 0;
174   work_chunks_created_ = 0;
175   work_chunks_deleted_ = 0;
176   reference_count_ = 0;
177   java_lang_Class_ = Class::GetJavaLangClass();
178   CHECK(java_lang_Class_ != nullptr);
179 
180   FindDefaultMarkBitmap();
181 
182   // Do any pre GC verification.
183   timings_.NewSplit("PreGcVerification");
184   heap_->PreGcVerification(this);
185 }
186 
ProcessReferences(Thread * self)187 void MarkSweep::ProcessReferences(Thread* self) {
188   base::TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
189   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
190   ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_,
191                     &finalizer_reference_list_, &phantom_reference_list_);
192 }
193 
HandleDirtyObjectsPhase()194 bool MarkSweep::HandleDirtyObjectsPhase() {
195   base::TimingLogger::ScopedSplit split("HandleDirtyObjectsPhase", &timings_);
196   Thread* self = Thread::Current();
197   Locks::mutator_lock_->AssertExclusiveHeld(self);
198 
199   {
200     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
201 
202     // Re-mark root set.
203     ReMarkRoots();
204 
205     // Scan dirty objects, this is only required if we are not doing concurrent GC.
206     RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
207   }
208 
209   ProcessReferences(self);
210 
211   // Only need to do this if we have the card mark verification on, and only during concurrent GC.
212   if (GetHeap()->verify_missing_card_marks_ || GetHeap()->verify_pre_gc_heap_||
213       GetHeap()->verify_post_gc_heap_) {
214     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
215     // This second sweep makes sure that we don't have any objects in the live stack which point to
216     // freed objects. These cause problems since their references may be previously freed objects.
217     SweepArray(GetHeap()->allocation_stack_.get(), false);
218   }
219 
220   timings_.StartSplit("PreSweepingGcVerification");
221   heap_->PreSweepingGcVerification(this);
222   timings_.EndSplit();
223 
224   // Ensure that nobody inserted items in the live stack after we swapped the stacks.
225   ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
226   CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
227 
228   // Disallow new system weaks to prevent a race which occurs when someone adds a new system
229   // weak before we sweep them. Since this new system weak may not be marked, the GC may
230   // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
231   // reference to a string that is about to be swept.
232   Runtime::Current()->DisallowNewSystemWeaks();
233   return true;
234 }
235 
IsConcurrent() const236 bool MarkSweep::IsConcurrent() const {
237   return is_concurrent_;
238 }
239 
MarkingPhase()240 void MarkSweep::MarkingPhase() {
241   base::TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
242   Thread* self = Thread::Current();
243 
244   BindBitmaps();
245   FindDefaultMarkBitmap();
246 
247   // Process dirty cards and add dirty cards to mod union tables.
248   heap_->ProcessCards(timings_);
249 
250   // Need to do this before the checkpoint since we don't want any threads to add references to
251   // the live stack during the recursive mark.
252   timings_.NewSplit("SwapStacks");
253   heap_->SwapStacks();
254 
255   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
256   if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
257     // If we exclusively hold the mutator lock, all threads must be suspended.
258     MarkRoots();
259   } else {
260     MarkThreadRoots(self);
261     // At this point the live stack should no longer have any mutators which push into it.
262     MarkNonThreadRoots();
263   }
264   live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
265   MarkConcurrentRoots();
266 
267   heap_->UpdateAndMarkModUnion(this, timings_, GetGcType());
268   MarkReachableObjects();
269 }
270 
MarkThreadRoots(Thread * self)271 void MarkSweep::MarkThreadRoots(Thread* self) {
272   MarkRootsCheckpoint(self);
273 }
274 
MarkReachableObjects()275 void MarkSweep::MarkReachableObjects() {
276   // Mark everything allocated since the last as GC live so that we can sweep concurrently,
277   // knowing that new allocations won't be marked as live.
278   timings_.StartSplit("MarkStackAsLive");
279   accounting::ObjectStack* live_stack = heap_->GetLiveStack();
280   heap_->MarkAllocStack(heap_->alloc_space_->GetLiveBitmap(),
281                         heap_->large_object_space_->GetLiveObjects(), live_stack);
282   live_stack->Reset();
283   timings_.EndSplit();
284   // Recursively mark all the non-image bits set in the mark bitmap.
285   RecursiveMark();
286 }
287 
ReclaimPhase()288 void MarkSweep::ReclaimPhase() {
289   base::TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
290   Thread* self = Thread::Current();
291 
292   if (!IsConcurrent()) {
293     ProcessReferences(self);
294   }
295 
296   {
297     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
298     SweepSystemWeaks();
299   }
300 
301   if (IsConcurrent()) {
302     Runtime::Current()->AllowNewSystemWeaks();
303 
304     base::TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_);
305     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
306     accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
307     // The allocation stack contains things allocated since the start of the GC. These may have been
308     // marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC.
309     // Remove these objects from the mark bitmaps so that they will be eligible for sticky
310     // collection.
311     // There is a race here which is safely handled. Another thread such as the hprof could
312     // have flushed the alloc stack after we resumed the threads. This is safe however, since
313     // reseting the allocation stack zeros it out with madvise. This means that we will either
314     // read NULLs or attempt to unmark a newly allocated object which will not be marked in the
315     // first place.
316     mirror::Object** end = allocation_stack->End();
317     for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) {
318       const Object* obj = *it;
319       if (obj != NULL) {
320         UnMarkObjectNonNull(obj);
321       }
322     }
323   }
324 
325   // Before freeing anything, lets verify the heap.
326   if (kIsDebugBuild) {
327     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
328     VerifyImageRoots();
329   }
330 
331   {
332     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
333 
334     // Reclaim unmarked objects.
335     Sweep(false);
336 
337     // Swap the live and mark bitmaps for each space which we modified space. This is an
338     // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
339     // bitmaps.
340     timings_.StartSplit("SwapBitmaps");
341     SwapBitmaps();
342     timings_.EndSplit();
343 
344     // Unbind the live and mark bitmaps.
345     UnBindBitmaps();
346   }
347 }
348 
SetImmuneRange(Object * begin,Object * end)349 void MarkSweep::SetImmuneRange(Object* begin, Object* end) {
350   immune_begin_ = begin;
351   immune_end_ = end;
352 }
353 
FindDefaultMarkBitmap()354 void MarkSweep::FindDefaultMarkBitmap() {
355   base::TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
356   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
357     if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
358       current_mark_bitmap_ = space->GetMarkBitmap();
359       CHECK(current_mark_bitmap_ != NULL);
360       return;
361     }
362   }
363   GetHeap()->DumpSpaces();
364   LOG(FATAL) << "Could not find a default mark bitmap";
365 }
366 
ExpandMarkStack()367 void MarkSweep::ExpandMarkStack() {
368   ResizeMarkStack(mark_stack_->Capacity() * 2);
369 }
370 
ResizeMarkStack(size_t new_size)371 void MarkSweep::ResizeMarkStack(size_t new_size) {
372   // Rare case, no need to have Thread::Current be a parameter.
373   if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
374     // Someone else acquired the lock and expanded the mark stack before us.
375     return;
376   }
377   std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
378   CHECK_LE(mark_stack_->Size(), new_size);
379   mark_stack_->Resize(new_size);
380   for (const auto& obj : temp) {
381     mark_stack_->PushBack(obj);
382   }
383 }
384 
MarkObjectNonNullParallel(const Object * obj)385 inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj) {
386   DCHECK(obj != NULL);
387   if (MarkObjectParallel(obj)) {
388     MutexLock mu(Thread::Current(), mark_stack_lock_);
389     if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
390       ExpandMarkStack();
391     }
392     // The object must be pushed on to the mark stack.
393     mark_stack_->PushBack(const_cast<Object*>(obj));
394   }
395 }
396 
UnMarkObjectNonNull(const Object * obj)397 inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) {
398   DCHECK(!IsImmune(obj));
399   // Try to take advantage of locality of references within a space, failing this find the space
400   // the hard way.
401   accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
402   if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
403     accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
404     if (LIKELY(new_bitmap != NULL)) {
405       object_bitmap = new_bitmap;
406     } else {
407       MarkLargeObject(obj, false);
408       return;
409     }
410   }
411 
412   DCHECK(object_bitmap->HasAddress(obj));
413   object_bitmap->Clear(obj);
414 }
415 
MarkObjectNonNull(const Object * obj)416 inline void MarkSweep::MarkObjectNonNull(const Object* obj) {
417   DCHECK(obj != NULL);
418 
419   if (IsImmune(obj)) {
420     DCHECK(IsMarked(obj));
421     return;
422   }
423 
424   // Try to take advantage of locality of references within a space, failing this find the space
425   // the hard way.
426   accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
427   if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
428     accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
429     if (LIKELY(new_bitmap != NULL)) {
430       object_bitmap = new_bitmap;
431     } else {
432       MarkLargeObject(obj, true);
433       return;
434     }
435   }
436 
437   // This object was not previously marked.
438   if (!object_bitmap->Test(obj)) {
439     object_bitmap->Set(obj);
440     if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
441       // Lock is not needed but is here anyways to please annotalysis.
442       MutexLock mu(Thread::Current(), mark_stack_lock_);
443       ExpandMarkStack();
444     }
445     // The object must be pushed on to the mark stack.
446     mark_stack_->PushBack(const_cast<Object*>(obj));
447   }
448 }
449 
450 // Rare case, probably not worth inlining since it will increase instruction cache miss rate.
MarkLargeObject(const Object * obj,bool set)451 bool MarkSweep::MarkLargeObject(const Object* obj, bool set) {
452   // TODO: support >1 discontinuous space.
453   space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
454   accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects();
455   if (kProfileLargeObjects) {
456     ++large_object_test_;
457   }
458   if (UNLIKELY(!large_objects->Test(obj))) {
459     if (!large_object_space->Contains(obj)) {
460       LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
461       LOG(ERROR) << "Attempting see if it's a bad root";
462       VerifyRoots();
463       LOG(FATAL) << "Can't mark bad root";
464     }
465     if (kProfileLargeObjects) {
466       ++large_object_mark_;
467     }
468     if (set) {
469       large_objects->Set(obj);
470     } else {
471       large_objects->Clear(obj);
472     }
473     return true;
474   }
475   return false;
476 }
477 
MarkObjectParallel(const Object * obj)478 inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
479   DCHECK(obj != NULL);
480 
481   if (IsImmune(obj)) {
482     DCHECK(IsMarked(obj));
483     return false;
484   }
485 
486   // Try to take advantage of locality of references within a space, failing this find the space
487   // the hard way.
488   accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
489   if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
490     accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
491     if (new_bitmap != NULL) {
492       object_bitmap = new_bitmap;
493     } else {
494       // TODO: Remove the Thread::Current here?
495       // TODO: Convert this to some kind of atomic marking?
496       MutexLock mu(Thread::Current(), large_object_lock_);
497       return MarkLargeObject(obj, true);
498     }
499   }
500 
501   // Return true if the object was not previously marked.
502   return !object_bitmap->AtomicTestAndSet(obj);
503 }
504 
505 // Used to mark objects when recursing.  Recursion is done by moving
506 // the finger across the bitmaps in address order and marking child
507 // objects.  Any newly-marked objects whose addresses are lower than
508 // the finger won't be visited by the bitmap scan, so those objects
509 // need to be added to the mark stack.
MarkObject(const Object * obj)510 inline void MarkSweep::MarkObject(const Object* obj) {
511   if (obj != NULL) {
512     MarkObjectNonNull(obj);
513   }
514 }
515 
MarkRoot(const Object * obj)516 void MarkSweep::MarkRoot(const Object* obj) {
517   if (obj != NULL) {
518     MarkObjectNonNull(obj);
519   }
520 }
521 
MarkRootParallelCallback(const Object * root,void * arg)522 void MarkSweep::MarkRootParallelCallback(const Object* root, void* arg) {
523   DCHECK(root != NULL);
524   DCHECK(arg != NULL);
525   reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(root);
526 }
527 
MarkObjectCallback(const Object * root,void * arg)528 void MarkSweep::MarkObjectCallback(const Object* root, void* arg) {
529   DCHECK(root != NULL);
530   DCHECK(arg != NULL);
531   MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
532   mark_sweep->MarkObjectNonNull(root);
533 }
534 
ReMarkObjectVisitor(const Object * root,void * arg)535 void MarkSweep::ReMarkObjectVisitor(const Object* root, void* arg) {
536   DCHECK(root != NULL);
537   DCHECK(arg != NULL);
538   MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
539   mark_sweep->MarkObjectNonNull(root);
540 }
541 
VerifyRootCallback(const Object * root,void * arg,size_t vreg,const StackVisitor * visitor)542 void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg,
543                                    const StackVisitor* visitor) {
544   reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor);
545 }
546 
VerifyRoot(const Object * root,size_t vreg,const StackVisitor * visitor)547 void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) {
548   // See if the root is on any space bitmap.
549   if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) {
550     space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
551     if (!large_object_space->Contains(root)) {
552       LOG(ERROR) << "Found invalid root: " << root;
553       if (visitor != NULL) {
554         LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg;
555       }
556     }
557   }
558 }
559 
VerifyRoots()560 void MarkSweep::VerifyRoots() {
561   Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this);
562 }
563 
564 // Marks all objects in the root set.
MarkRoots()565 void MarkSweep::MarkRoots() {
566   timings_.StartSplit("MarkRoots");
567   Runtime::Current()->VisitNonConcurrentRoots(MarkObjectCallback, this);
568   timings_.EndSplit();
569 }
570 
MarkNonThreadRoots()571 void MarkSweep::MarkNonThreadRoots() {
572   timings_.StartSplit("MarkNonThreadRoots");
573   Runtime::Current()->VisitNonThreadRoots(MarkObjectCallback, this);
574   timings_.EndSplit();
575 }
576 
MarkConcurrentRoots()577 void MarkSweep::MarkConcurrentRoots() {
578   timings_.StartSplit("MarkConcurrentRoots");
579   // Visit all runtime roots and clear dirty flags.
580   Runtime::Current()->VisitConcurrentRoots(MarkObjectCallback, this, false, true);
581   timings_.EndSplit();
582 }
583 
CheckObject(const Object * obj)584 void MarkSweep::CheckObject(const Object* obj) {
585   DCHECK(obj != NULL);
586   VisitObjectReferences(obj, [this](const Object* obj, const Object* ref, MemberOffset offset,
587       bool is_static) NO_THREAD_SAFETY_ANALYSIS {
588     Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
589     CheckReference(obj, ref, offset, is_static);
590   });
591 }
592 
VerifyImageRootVisitor(Object * root,void * arg)593 void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) {
594   DCHECK(root != NULL);
595   DCHECK(arg != NULL);
596   MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
597   DCHECK(mark_sweep->heap_->GetMarkBitmap()->Test(root));
598   mark_sweep->CheckObject(root);
599 }
600 
BindLiveToMarkBitmap(space::ContinuousSpace * space)601 void MarkSweep::BindLiveToMarkBitmap(space::ContinuousSpace* space) {
602   CHECK(space->IsDlMallocSpace());
603   space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
604   accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
605   accounting::SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release();
606   GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
607   alloc_space->temp_bitmap_.reset(mark_bitmap);
608   alloc_space->mark_bitmap_.reset(live_bitmap);
609 }
610 
611 class ScanObjectVisitor {
612  public:
ScanObjectVisitor(MarkSweep * const mark_sweep)613   explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
614       : mark_sweep_(mark_sweep) {}
615 
616   // TODO: Fixme when anotatalysis works with visitors.
operator ()(const Object * obj) const617   void operator()(const Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
618     if (kCheckLocks) {
619       Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
620       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
621     }
622     mark_sweep_->ScanObject(obj);
623   }
624 
625  private:
626   MarkSweep* const mark_sweep_;
627 };
628 
629 template <bool kUseFinger = false>
630 class MarkStackTask : public Task {
631  public:
MarkStackTask(ThreadPool * thread_pool,MarkSweep * mark_sweep,size_t mark_stack_size,const Object ** mark_stack)632   MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size,
633                 const Object** mark_stack)
634       : mark_sweep_(mark_sweep),
635         thread_pool_(thread_pool),
636         mark_stack_pos_(mark_stack_size) {
637     // We may have to copy part of an existing mark stack when another mark stack overflows.
638     if (mark_stack_size != 0) {
639       DCHECK(mark_stack != NULL);
640       // TODO: Check performance?
641       std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
642     }
643     if (kCountTasks) {
644       ++mark_sweep_->work_chunks_created_;
645     }
646   }
647 
648   static const size_t kMaxSize = 1 * KB;
649 
650  protected:
651   class ScanObjectParallelVisitor {
652    public:
ScanObjectParallelVisitor(MarkStackTask<kUseFinger> * chunk_task)653     explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE
654         : chunk_task_(chunk_task) {}
655 
operator ()(const Object * obj) const656     void operator()(const Object* obj) const {
657       MarkSweep* mark_sweep = chunk_task_->mark_sweep_;
658       mark_sweep->ScanObjectVisit(obj,
659           [mark_sweep, this](const Object* /* obj */, const Object* ref,
660               const MemberOffset& /* offset */, bool /* is_static */) ALWAYS_INLINE {
661         if (ref != nullptr && mark_sweep->MarkObjectParallel(ref)) {
662           if (kUseFinger) {
663             android_memory_barrier();
664             if (reinterpret_cast<uintptr_t>(ref) >=
665                 static_cast<uintptr_t>(mark_sweep->atomic_finger_)) {
666               return;
667             }
668           }
669           chunk_task_->MarkStackPush(ref);
670         }
671       });
672     }
673 
674    private:
675     MarkStackTask<kUseFinger>* const chunk_task_;
676   };
677 
~MarkStackTask()678   virtual ~MarkStackTask() {
679     // Make sure that we have cleared our mark stack.
680     DCHECK_EQ(mark_stack_pos_, 0U);
681     if (kCountTasks) {
682       ++mark_sweep_->work_chunks_deleted_;
683     }
684   }
685 
686   MarkSweep* const mark_sweep_;
687   ThreadPool* const thread_pool_;
688   // Thread local mark stack for this task.
689   const Object* mark_stack_[kMaxSize];
690   // Mark stack position.
691   size_t mark_stack_pos_;
692 
MarkStackPush(const Object * obj)693   void MarkStackPush(const Object* obj) ALWAYS_INLINE {
694     if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
695       // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
696       mark_stack_pos_ /= 2;
697       auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_,
698                                      mark_stack_ + mark_stack_pos_);
699       thread_pool_->AddTask(Thread::Current(), task);
700     }
701     DCHECK(obj != nullptr);
702     DCHECK(mark_stack_pos_ < kMaxSize);
703     mark_stack_[mark_stack_pos_++] = obj;
704   }
705 
Finalize()706   virtual void Finalize() {
707     delete this;
708   }
709 
710   // Scans all of the objects
Run(Thread * self)711   virtual void Run(Thread* self) {
712     ScanObjectParallelVisitor visitor(this);
713     // TODO: Tune this.
714     static const size_t kFifoSize = 4;
715     BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo;
716     for (;;) {
717       const Object* obj = NULL;
718       if (kUseMarkStackPrefetch) {
719         while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
720           const Object* obj = mark_stack_[--mark_stack_pos_];
721           DCHECK(obj != NULL);
722           __builtin_prefetch(obj);
723           prefetch_fifo.push_back(obj);
724         }
725         if (UNLIKELY(prefetch_fifo.empty())) {
726           break;
727         }
728         obj = prefetch_fifo.front();
729         prefetch_fifo.pop_front();
730       } else {
731         if (UNLIKELY(mark_stack_pos_ == 0)) {
732           break;
733         }
734         obj = mark_stack_[--mark_stack_pos_];
735       }
736       DCHECK(obj != NULL);
737       visitor(obj);
738     }
739   }
740 };
741 
742 class CardScanTask : public MarkStackTask<false> {
743  public:
CardScanTask(ThreadPool * thread_pool,MarkSweep * mark_sweep,accounting::SpaceBitmap * bitmap,byte * begin,byte * end,byte minimum_age,size_t mark_stack_size,const Object ** mark_stack_obj)744   CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, accounting::SpaceBitmap* bitmap,
745                byte* begin, byte* end, byte minimum_age, size_t mark_stack_size,
746                const Object** mark_stack_obj)
747       : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
748         bitmap_(bitmap),
749         begin_(begin),
750         end_(end),
751         minimum_age_(minimum_age) {
752   }
753 
754  protected:
755   accounting::SpaceBitmap* const bitmap_;
756   byte* const begin_;
757   byte* const end_;
758   const byte minimum_age_;
759 
Finalize()760   virtual void Finalize() {
761     delete this;
762   }
763 
Run(Thread * self)764   virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
765     ScanObjectParallelVisitor visitor(this);
766     accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
767     size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_);
768     mark_sweep_->cards_scanned_.fetch_add(cards_scanned);
769     VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - "
770         << reinterpret_cast<void*>(end_) << " = " << cards_scanned;
771     // Finish by emptying our local mark stack.
772     MarkStackTask::Run(self);
773   }
774 };
775 
GetThreadCount(bool paused) const776 size_t MarkSweep::GetThreadCount(bool paused) const {
777   if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) {
778     return 0;
779   }
780   if (paused) {
781     return heap_->GetParallelGCThreadCount() + 1;
782   } else {
783     return heap_->GetConcGCThreadCount() + 1;
784   }
785 }
786 
ScanGrayObjects(bool paused,byte minimum_age)787 void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
788   accounting::CardTable* card_table = GetHeap()->GetCardTable();
789   ThreadPool* thread_pool = GetHeap()->GetThreadPool();
790   size_t thread_count = GetThreadCount(paused);
791   // The parallel version with only one thread is faster for card scanning, TODO: fix.
792   if (kParallelCardScan && thread_count > 0) {
793     Thread* self = Thread::Current();
794     // Can't have a different split for each space since multiple spaces can have their cards being
795     // scanned at the same time.
796     timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects");
797     // Try to take some of the mark stack since we can pass this off to the worker tasks.
798     const Object** mark_stack_begin = const_cast<const Object**>(mark_stack_->Begin());
799     const Object** mark_stack_end = const_cast<const Object**>(mark_stack_->End());
800     const size_t mark_stack_size = mark_stack_end - mark_stack_begin;
801     // Estimated number of work tasks we will create.
802     const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
803     DCHECK_NE(mark_stack_tasks, 0U);
804     const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
805                                              mark_stack_size / mark_stack_tasks + 1);
806     size_t ref_card_count = 0;
807     cards_scanned_ = 0;
808     for (const auto& space : GetHeap()->GetContinuousSpaces()) {
809       byte* card_begin = space->Begin();
810       byte* card_end = space->End();
811       // Calculate how many bytes of heap we will scan,
812       const size_t address_range = card_end - card_begin;
813       // Calculate how much address range each task gets.
814       const size_t card_delta = RoundUp(address_range / thread_count + 1,
815                                         accounting::CardTable::kCardSize);
816       // Create the worker tasks for this space.
817       while (card_begin != card_end) {
818         // Add a range of cards.
819         size_t addr_remaining = card_end - card_begin;
820         size_t card_increment = std::min(card_delta, addr_remaining);
821         // Take from the back of the mark stack.
822         size_t mark_stack_remaining = mark_stack_end - mark_stack_begin;
823         size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining);
824         mark_stack_end -= mark_stack_increment;
825         mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
826         DCHECK_EQ(mark_stack_end, mark_stack_->End());
827         // Add the new task to the thread pool.
828         auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin,
829                                       card_begin + card_increment, minimum_age,
830                                       mark_stack_increment, mark_stack_end);
831         thread_pool->AddTask(self, task);
832         card_begin += card_increment;
833       }
834 
835       if (paused && kIsDebugBuild) {
836         // Make sure we don't miss scanning any cards.
837         size_t scanned_cards = card_table->Scan(space->GetMarkBitmap(), space->Begin(),
838                                                 space->End(), VoidFunctor(), minimum_age);
839         VLOG(heap) << "Scanning space cards " << reinterpret_cast<void*>(space->Begin()) << " - "
840             << reinterpret_cast<void*>(space->End()) << " = " << scanned_cards;
841         ref_card_count += scanned_cards;
842       }
843     }
844 
845     thread_pool->SetMaxActiveWorkers(thread_count - 1);
846     thread_pool->StartWorkers(self);
847     thread_pool->Wait(self, true, true);
848     thread_pool->StopWorkers(self);
849     if (paused) {
850       DCHECK_EQ(ref_card_count, static_cast<size_t>(cards_scanned_.load()));
851     }
852     timings_.EndSplit();
853   } else {
854     for (const auto& space : GetHeap()->GetContinuousSpaces()) {
855       // Image spaces are handled properly since live == marked for them.
856       switch (space->GetGcRetentionPolicy()) {
857         case space::kGcRetentionPolicyNeverCollect:
858           timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" :
859               "ScanGrayImageSpaceObjects");
860           break;
861         case space::kGcRetentionPolicyFullCollect:
862           timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" :
863               "ScanGrayZygoteSpaceObjects");
864           break;
865         case space::kGcRetentionPolicyAlwaysCollect:
866           timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" :
867               "ScanGrayAllocSpaceObjects");
868           break;
869         }
870       ScanObjectVisitor visitor(this);
871       card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age);
872       timings_.EndSplit();
873     }
874   }
875 }
876 
VerifyImageRoots()877 void MarkSweep::VerifyImageRoots() {
878   // Verify roots ensures that all the references inside the image space point
879   // objects which are either in the image space or marked objects in the alloc
880   // space
881   timings_.StartSplit("VerifyImageRoots");
882   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
883     if (space->IsImageSpace()) {
884       space::ImageSpace* image_space = space->AsImageSpace();
885       uintptr_t begin = reinterpret_cast<uintptr_t>(image_space->Begin());
886       uintptr_t end = reinterpret_cast<uintptr_t>(image_space->End());
887       accounting::SpaceBitmap* live_bitmap = image_space->GetLiveBitmap();
888       DCHECK(live_bitmap != NULL);
889       live_bitmap->VisitMarkedRange(begin, end, [this](const Object* obj) {
890         if (kCheckLocks) {
891           Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
892         }
893         DCHECK(obj != NULL);
894         CheckObject(obj);
895       });
896     }
897   }
898   timings_.EndSplit();
899 }
900 
901 class RecursiveMarkTask : public MarkStackTask<false> {
902  public:
RecursiveMarkTask(ThreadPool * thread_pool,MarkSweep * mark_sweep,accounting::SpaceBitmap * bitmap,uintptr_t begin,uintptr_t end)903   RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
904                     accounting::SpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
905       : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL),
906         bitmap_(bitmap),
907         begin_(begin),
908         end_(end) {
909   }
910 
911  protected:
912   accounting::SpaceBitmap* const bitmap_;
913   const uintptr_t begin_;
914   const uintptr_t end_;
915 
Finalize()916   virtual void Finalize() {
917     delete this;
918   }
919 
920   // Scans all of the objects
Run(Thread * self)921   virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
922     ScanObjectParallelVisitor visitor(this);
923     bitmap_->VisitMarkedRange(begin_, end_, visitor);
924     // Finish by emptying our local mark stack.
925     MarkStackTask::Run(self);
926   }
927 };
928 
929 // Populates the mark stack based on the set of marked objects and
930 // recursively marks until the mark stack is emptied.
RecursiveMark()931 void MarkSweep::RecursiveMark() {
932   base::TimingLogger::ScopedSplit split("RecursiveMark", &timings_);
933   // RecursiveMark will build the lists of known instances of the Reference classes.
934   // See DelayReferenceReferent for details.
935   CHECK(soft_reference_list_ == NULL);
936   CHECK(weak_reference_list_ == NULL);
937   CHECK(finalizer_reference_list_ == NULL);
938   CHECK(phantom_reference_list_ == NULL);
939   CHECK(cleared_reference_list_ == NULL);
940 
941   if (kUseRecursiveMark) {
942     const bool partial = GetGcType() == kGcTypePartial;
943     ScanObjectVisitor scan_visitor(this);
944     auto* self = Thread::Current();
945     ThreadPool* thread_pool = heap_->GetThreadPool();
946     size_t thread_count = GetThreadCount(false);
947     const bool parallel = kParallelRecursiveMark && thread_count > 1;
948     mark_stack_->Reset();
949     for (const auto& space : GetHeap()->GetContinuousSpaces()) {
950       if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
951           (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
952         current_mark_bitmap_ = space->GetMarkBitmap();
953         if (current_mark_bitmap_ == NULL) {
954           GetHeap()->DumpSpaces();
955           LOG(FATAL) << "invalid bitmap";
956         }
957         if (parallel) {
958           // We will use the mark stack the future.
959           // CHECK(mark_stack_->IsEmpty());
960           // This function does not handle heap end increasing, so we must use the space end.
961           uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
962           uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
963           atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF);
964 
965           // Create a few worker tasks.
966           const size_t n = thread_count * 2;
967           while (begin != end) {
968             uintptr_t start = begin;
969             uintptr_t delta = (end - begin) / n;
970             delta = RoundUp(delta, KB);
971             if (delta < 16 * KB) delta = end - begin;
972             begin += delta;
973             auto* task = new RecursiveMarkTask(thread_pool, this, current_mark_bitmap_, start,
974                                                begin);
975             thread_pool->AddTask(self, task);
976           }
977           thread_pool->SetMaxActiveWorkers(thread_count - 1);
978           thread_pool->StartWorkers(self);
979           thread_pool->Wait(self, true, true);
980           thread_pool->StopWorkers(self);
981         } else {
982           // This function does not handle heap end increasing, so we must use the space end.
983           uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
984           uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
985           current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
986         }
987       }
988     }
989   }
990   ProcessMarkStack(false);
991 }
992 
IsMarkedCallback(const Object * object,void * arg)993 bool MarkSweep::IsMarkedCallback(const Object* object, void* arg) {
994   return reinterpret_cast<MarkSweep*>(arg)->IsMarked(object);
995 }
996 
RecursiveMarkDirtyObjects(bool paused,byte minimum_age)997 void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) {
998   ScanGrayObjects(paused, minimum_age);
999   ProcessMarkStack(paused);
1000 }
1001 
ReMarkRoots()1002 void MarkSweep::ReMarkRoots() {
1003   timings_.StartSplit("ReMarkRoots");
1004   Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this, true, true);
1005   timings_.EndSplit();
1006 }
1007 
SweepJniWeakGlobals(IsMarkedTester is_marked,void * arg)1008 void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) {
1009   Runtime::Current()->GetJavaVM()->SweepWeakGlobals(is_marked, arg);
1010 }
1011 
1012 struct ArrayMarkedCheck {
1013   accounting::ObjectStack* live_stack;
1014   MarkSweep* mark_sweep;
1015 };
1016 
1017 // Either marked or not live.
IsMarkedArrayCallback(const Object * object,void * arg)1018 bool MarkSweep::IsMarkedArrayCallback(const Object* object, void* arg) {
1019   ArrayMarkedCheck* array_check = reinterpret_cast<ArrayMarkedCheck*>(arg);
1020   if (array_check->mark_sweep->IsMarked(object)) {
1021     return true;
1022   }
1023   accounting::ObjectStack* live_stack = array_check->live_stack;
1024   if (std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End()) {
1025     return true;
1026   }
1027   return false;
1028 }
1029 
SweepSystemWeaks()1030 void MarkSweep::SweepSystemWeaks() {
1031   Runtime* runtime = Runtime::Current();
1032   timings_.StartSplit("SweepSystemWeaks");
1033   runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedCallback, this);
1034   runtime->GetMonitorList()->SweepMonitorList(IsMarkedCallback, this);
1035   SweepJniWeakGlobals(IsMarkedCallback, this);
1036   timings_.EndSplit();
1037 }
1038 
VerifyIsLiveCallback(const Object * obj,void * arg)1039 bool MarkSweep::VerifyIsLiveCallback(const Object* obj, void* arg) {
1040   reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
1041   // We don't actually want to sweep the object, so lets return "marked"
1042   return true;
1043 }
1044 
VerifyIsLive(const Object * obj)1045 void MarkSweep::VerifyIsLive(const Object* obj) {
1046   Heap* heap = GetHeap();
1047   if (!heap->GetLiveBitmap()->Test(obj)) {
1048     space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
1049     if (!large_object_space->GetLiveObjects()->Test(obj)) {
1050       if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) ==
1051           heap->allocation_stack_->End()) {
1052         // Object not found!
1053         heap->DumpSpaces();
1054         LOG(FATAL) << "Found dead object " << obj;
1055       }
1056     }
1057   }
1058 }
1059 
VerifySystemWeaks()1060 void MarkSweep::VerifySystemWeaks() {
1061   Runtime* runtime = Runtime::Current();
1062   // Verify system weaks, uses a special IsMarked callback which always returns true.
1063   runtime->GetInternTable()->SweepInternTableWeaks(VerifyIsLiveCallback, this);
1064   runtime->GetMonitorList()->SweepMonitorList(VerifyIsLiveCallback, this);
1065   runtime->GetJavaVM()->SweepWeakGlobals(VerifyIsLiveCallback, this);
1066 }
1067 
1068 struct SweepCallbackContext {
1069   MarkSweep* mark_sweep;
1070   space::AllocSpace* space;
1071   Thread* self;
1072 };
1073 
1074 class CheckpointMarkThreadRoots : public Closure {
1075  public:
CheckpointMarkThreadRoots(MarkSweep * mark_sweep)1076   explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
1077 
Run(Thread * thread)1078   virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
1079     ATRACE_BEGIN("Marking thread roots");
1080     // Note: self is not necessarily equal to thread since thread may be suspended.
1081     Thread* self = Thread::Current();
1082     CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
1083         << thread->GetState() << " thread " << thread << " self " << self;
1084     thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_);
1085     ATRACE_END();
1086     mark_sweep_->GetBarrier().Pass(self);
1087   }
1088 
1089  private:
1090   MarkSweep* mark_sweep_;
1091 };
1092 
MarkRootsCheckpoint(Thread * self)1093 void MarkSweep::MarkRootsCheckpoint(Thread* self) {
1094   CheckpointMarkThreadRoots check_point(this);
1095   timings_.StartSplit("MarkRootsCheckpoint");
1096   ThreadList* thread_list = Runtime::Current()->GetThreadList();
1097   // Request the check point is run on all threads returning a count of the threads that must
1098   // run through the barrier including self.
1099   size_t barrier_count = thread_list->RunCheckpoint(&check_point);
1100   // Release locks then wait for all mutator threads to pass the barrier.
1101   // TODO: optimize to not release locks when there are no threads to wait for.
1102   Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
1103   Locks::mutator_lock_->SharedUnlock(self);
1104   ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun);
1105   CHECK_EQ(old_state, kWaitingPerformingGc);
1106   gc_barrier_->Increment(self, barrier_count);
1107   self->SetState(kWaitingPerformingGc);
1108   Locks::mutator_lock_->SharedLock(self);
1109   Locks::heap_bitmap_lock_->ExclusiveLock(self);
1110   timings_.EndSplit();
1111 }
1112 
SweepCallback(size_t num_ptrs,Object ** ptrs,void * arg)1113 void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
1114   SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
1115   MarkSweep* mark_sweep = context->mark_sweep;
1116   Heap* heap = mark_sweep->GetHeap();
1117   space::AllocSpace* space = context->space;
1118   Thread* self = context->self;
1119   Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
1120   // Use a bulk free, that merges consecutive objects before freeing or free per object?
1121   // Documentation suggests better free performance with merging, but this may be at the expensive
1122   // of allocation.
1123   size_t freed_objects = num_ptrs;
1124   // AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit
1125   size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs);
1126   heap->RecordFree(freed_objects, freed_bytes);
1127   mark_sweep->freed_objects_.fetch_add(freed_objects);
1128   mark_sweep->freed_bytes_.fetch_add(freed_bytes);
1129 }
1130 
ZygoteSweepCallback(size_t num_ptrs,Object ** ptrs,void * arg)1131 void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
1132   SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
1133   Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self);
1134   Heap* heap = context->mark_sweep->GetHeap();
1135   // We don't free any actual memory to avoid dirtying the shared zygote pages.
1136   for (size_t i = 0; i < num_ptrs; ++i) {
1137     Object* obj = static_cast<Object*>(ptrs[i]);
1138     heap->GetLiveBitmap()->Clear(obj);
1139     heap->GetCardTable()->MarkCard(obj);
1140   }
1141 }
1142 
SweepArray(accounting::ObjectStack * allocations,bool swap_bitmaps)1143 void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
1144   space::DlMallocSpace* space = heap_->GetAllocSpace();
1145   timings_.StartSplit("SweepArray");
1146   // Newly allocated objects MUST be in the alloc space and those are the only objects which we are
1147   // going to free.
1148   accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
1149   accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1150   space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
1151   accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
1152   accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
1153   if (swap_bitmaps) {
1154     std::swap(live_bitmap, mark_bitmap);
1155     std::swap(large_live_objects, large_mark_objects);
1156   }
1157 
1158   size_t freed_bytes = 0;
1159   size_t freed_large_object_bytes = 0;
1160   size_t freed_objects = 0;
1161   size_t freed_large_objects = 0;
1162   size_t count = allocations->Size();
1163   Object** objects = const_cast<Object**>(allocations->Begin());
1164   Object** out = objects;
1165   Object** objects_to_chunk_free = out;
1166 
1167   // Empty the allocation stack.
1168   Thread* self = Thread::Current();
1169   for (size_t i = 0; i < count; ++i) {
1170     Object* obj = objects[i];
1171     // There should only be objects in the AllocSpace/LargeObjectSpace in the allocation stack.
1172     if (LIKELY(mark_bitmap->HasAddress(obj))) {
1173       if (!mark_bitmap->Test(obj)) {
1174         // Don't bother un-marking since we clear the mark bitmap anyways.
1175         *(out++) = obj;
1176         // Free objects in chunks.
1177         DCHECK_GE(out, objects_to_chunk_free);
1178         DCHECK_LE(static_cast<size_t>(out - objects_to_chunk_free), kSweepArrayChunkFreeSize);
1179         if (static_cast<size_t>(out - objects_to_chunk_free) == kSweepArrayChunkFreeSize) {
1180           timings_.StartSplit("FreeList");
1181           size_t chunk_freed_objects = out - objects_to_chunk_free;
1182           freed_objects += chunk_freed_objects;
1183           freed_bytes += space->FreeList(self, chunk_freed_objects, objects_to_chunk_free);
1184           objects_to_chunk_free = out;
1185           timings_.EndSplit();
1186         }
1187       }
1188     } else if (!large_mark_objects->Test(obj)) {
1189       ++freed_large_objects;
1190       freed_large_object_bytes += large_object_space->Free(self, obj);
1191     }
1192   }
1193   // Free the remaining objects in chunks.
1194   DCHECK_GE(out, objects_to_chunk_free);
1195   DCHECK_LE(static_cast<size_t>(out - objects_to_chunk_free), kSweepArrayChunkFreeSize);
1196   if (out - objects_to_chunk_free > 0) {
1197     timings_.StartSplit("FreeList");
1198     size_t chunk_freed_objects = out - objects_to_chunk_free;
1199     freed_objects += chunk_freed_objects;
1200     freed_bytes += space->FreeList(self, chunk_freed_objects, objects_to_chunk_free);
1201     timings_.EndSplit();
1202   }
1203   CHECK_EQ(count, allocations->Size());
1204   timings_.EndSplit();
1205 
1206   timings_.StartSplit("RecordFree");
1207   VLOG(heap) << "Freed " << freed_objects << "/" << count
1208              << " objects with size " << PrettySize(freed_bytes);
1209   heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes + freed_large_object_bytes);
1210   freed_objects_.fetch_add(freed_objects);
1211   freed_large_objects_.fetch_add(freed_large_objects);
1212   freed_bytes_.fetch_add(freed_bytes);
1213   freed_large_object_bytes_.fetch_add(freed_large_object_bytes);
1214   timings_.EndSplit();
1215 
1216   timings_.StartSplit("ResetStack");
1217   allocations->Reset();
1218   timings_.EndSplit();
1219 }
1220 
Sweep(bool swap_bitmaps)1221 void MarkSweep::Sweep(bool swap_bitmaps) {
1222   DCHECK(mark_stack_->IsEmpty());
1223   base::TimingLogger::ScopedSplit("Sweep", &timings_);
1224 
1225   const bool partial = (GetGcType() == kGcTypePartial);
1226   SweepCallbackContext scc;
1227   scc.mark_sweep = this;
1228   scc.self = Thread::Current();
1229   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1230     // We always sweep always collect spaces.
1231     bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect);
1232     if (!partial && !sweep_space) {
1233       // We sweep full collect spaces when the GC isn't a partial GC (ie its full).
1234       sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect);
1235     }
1236     if (sweep_space) {
1237       uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
1238       uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
1239       scc.space = space->AsDlMallocSpace();
1240       accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
1241       accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1242       if (swap_bitmaps) {
1243         std::swap(live_bitmap, mark_bitmap);
1244       }
1245       if (!space->IsZygoteSpace()) {
1246         base::TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_);
1247         // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
1248         accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
1249                                            &SweepCallback, reinterpret_cast<void*>(&scc));
1250       } else {
1251         base::TimingLogger::ScopedSplit split("SweepZygote", &timings_);
1252         // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual
1253         // memory.
1254         accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
1255                                            &ZygoteSweepCallback, reinterpret_cast<void*>(&scc));
1256       }
1257     }
1258   }
1259 
1260   SweepLargeObjects(swap_bitmaps);
1261 }
1262 
SweepLargeObjects(bool swap_bitmaps)1263 void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
1264   base::TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
1265   // Sweep large objects
1266   space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
1267   accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
1268   accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
1269   if (swap_bitmaps) {
1270     std::swap(large_live_objects, large_mark_objects);
1271   }
1272   // O(n*log(n)) but hopefully there are not too many large objects.
1273   size_t freed_objects = 0;
1274   size_t freed_bytes = 0;
1275   Thread* self = Thread::Current();
1276   for (const Object* obj : large_live_objects->GetObjects()) {
1277     if (!large_mark_objects->Test(obj)) {
1278       freed_bytes += large_object_space->Free(self, const_cast<Object*>(obj));
1279       ++freed_objects;
1280     }
1281   }
1282   freed_large_objects_.fetch_add(freed_objects);
1283   freed_large_object_bytes_.fetch_add(freed_bytes);
1284   GetHeap()->RecordFree(freed_objects, freed_bytes);
1285 }
1286 
CheckReference(const Object * obj,const Object * ref,MemberOffset offset,bool is_static)1287 void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) {
1288   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1289     if (space->IsDlMallocSpace() && space->Contains(ref)) {
1290       DCHECK(IsMarked(obj));
1291 
1292       bool is_marked = IsMarked(ref);
1293       if (!is_marked) {
1294         LOG(INFO) << *space;
1295         LOG(WARNING) << (is_static ? "Static ref'" : "Instance ref'") << PrettyTypeOf(ref)
1296                      << "' (" << reinterpret_cast<const void*>(ref) << ") in '" << PrettyTypeOf(obj)
1297                      << "' (" << reinterpret_cast<const void*>(obj) << ") at offset "
1298                      << reinterpret_cast<void*>(offset.Int32Value()) << " wasn't marked";
1299 
1300         const Class* klass = is_static ? obj->AsClass() : obj->GetClass();
1301         DCHECK(klass != NULL);
1302         const ObjectArray<ArtField>* fields = is_static ? klass->GetSFields() : klass->GetIFields();
1303         DCHECK(fields != NULL);
1304         bool found = false;
1305         for (int32_t i = 0; i < fields->GetLength(); ++i) {
1306           const ArtField* cur = fields->Get(i);
1307           if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
1308             LOG(WARNING) << "Field referencing the alloc space was " << PrettyField(cur);
1309             found = true;
1310             break;
1311           }
1312         }
1313         if (!found) {
1314           LOG(WARNING) << "Could not find field in object alloc space with offset " << offset.Int32Value();
1315         }
1316 
1317         bool obj_marked = heap_->GetCardTable()->IsDirty(obj);
1318         if (!obj_marked) {
1319           LOG(WARNING) << "Object '" << PrettyTypeOf(obj) << "' "
1320                        << "(" << reinterpret_cast<const void*>(obj) << ") contains references to "
1321                        << "the alloc space, but wasn't card marked";
1322         }
1323       }
1324     }
1325     break;
1326   }
1327 }
1328 
1329 // Process the "referent" field in a java.lang.ref.Reference.  If the
1330 // referent has not yet been marked, put it on the appropriate list in
1331 // the heap for later processing.
DelayReferenceReferent(mirror::Class * klass,Object * obj)1332 void MarkSweep::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
1333   DCHECK(klass != nullptr);
1334   DCHECK(klass->IsReferenceClass());
1335   DCHECK(obj != NULL);
1336   Object* referent = heap_->GetReferenceReferent(obj);
1337   if (referent != NULL && !IsMarked(referent)) {
1338     if (kCountJavaLangRefs) {
1339       ++reference_count_;
1340     }
1341     Thread* self = Thread::Current();
1342     // TODO: Remove these locks, and use atomic stacks for storing references?
1343     // We need to check that the references haven't already been enqueued since we can end up
1344     // scanning the same reference multiple times due to dirty cards.
1345     if (klass->IsSoftReferenceClass()) {
1346       MutexLock mu(self, *heap_->GetSoftRefQueueLock());
1347       if (!heap_->IsEnqueued(obj)) {
1348         heap_->EnqueuePendingReference(obj, &soft_reference_list_);
1349       }
1350     } else if (klass->IsWeakReferenceClass()) {
1351       MutexLock mu(self, *heap_->GetWeakRefQueueLock());
1352       if (!heap_->IsEnqueued(obj)) {
1353         heap_->EnqueuePendingReference(obj, &weak_reference_list_);
1354       }
1355     } else if (klass->IsFinalizerReferenceClass()) {
1356       MutexLock mu(self, *heap_->GetFinalizerRefQueueLock());
1357       if (!heap_->IsEnqueued(obj)) {
1358         heap_->EnqueuePendingReference(obj, &finalizer_reference_list_);
1359       }
1360     } else if (klass->IsPhantomReferenceClass()) {
1361       MutexLock mu(self, *heap_->GetPhantomRefQueueLock());
1362       if (!heap_->IsEnqueued(obj)) {
1363         heap_->EnqueuePendingReference(obj, &phantom_reference_list_);
1364       }
1365     } else {
1366       LOG(FATAL) << "Invalid reference type " << PrettyClass(klass)
1367                  << " " << std::hex << klass->GetAccessFlags();
1368     }
1369   }
1370 }
1371 
ScanRoot(const Object * obj)1372 void MarkSweep::ScanRoot(const Object* obj) {
1373   ScanObject(obj);
1374 }
1375 
1376 class MarkObjectVisitor {
1377  public:
MarkObjectVisitor(MarkSweep * const mark_sweep)1378   explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {}
1379 
1380   // TODO: Fixme when anotatalysis works with visitors.
operator ()(const Object *,const Object * ref,const MemberOffset &,bool) const1381   void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */,
1382                   bool /* is_static */) const ALWAYS_INLINE
1383       NO_THREAD_SAFETY_ANALYSIS {
1384     if (kCheckLocks) {
1385       Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1386       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1387     }
1388     mark_sweep_->MarkObject(ref);
1389   }
1390 
1391  private:
1392   MarkSweep* const mark_sweep_;
1393 };
1394 
1395 // Scans an object reference.  Determines the type of the reference
1396 // and dispatches to a specialized scanning routine.
ScanObject(const Object * obj)1397 void MarkSweep::ScanObject(const Object* obj) {
1398   MarkObjectVisitor visitor(this);
1399   ScanObjectVisit(obj, visitor);
1400 }
1401 
ProcessMarkStackParallel(size_t thread_count)1402 void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
1403   Thread* self = Thread::Current();
1404   ThreadPool* thread_pool = GetHeap()->GetThreadPool();
1405   const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1,
1406                                      static_cast<size_t>(MarkStackTask<false>::kMaxSize));
1407   CHECK_GT(chunk_size, 0U);
1408   // Split the current mark stack up into work tasks.
1409   for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) {
1410     const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size);
1411     thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta,
1412                                                         const_cast<const mirror::Object**>(it)));
1413     it += delta;
1414   }
1415   thread_pool->SetMaxActiveWorkers(thread_count - 1);
1416   thread_pool->StartWorkers(self);
1417   thread_pool->Wait(self, true, true);
1418   thread_pool->StopWorkers(self);
1419   mark_stack_->Reset();
1420   CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked";
1421 }
1422 
1423 // Scan anything that's on the mark stack.
ProcessMarkStack(bool paused)1424 void MarkSweep::ProcessMarkStack(bool paused) {
1425   timings_.StartSplit("ProcessMarkStack");
1426   size_t thread_count = GetThreadCount(paused);
1427   if (kParallelProcessMarkStack && thread_count > 1 &&
1428       mark_stack_->Size() >= kMinimumParallelMarkStackSize) {
1429     ProcessMarkStackParallel(thread_count);
1430   } else {
1431     // TODO: Tune this.
1432     static const size_t kFifoSize = 4;
1433     BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo;
1434     for (;;) {
1435       const Object* obj = NULL;
1436       if (kUseMarkStackPrefetch) {
1437         while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
1438           const Object* obj = mark_stack_->PopBack();
1439           DCHECK(obj != NULL);
1440           __builtin_prefetch(obj);
1441           prefetch_fifo.push_back(obj);
1442         }
1443         if (prefetch_fifo.empty()) {
1444           break;
1445         }
1446         obj = prefetch_fifo.front();
1447         prefetch_fifo.pop_front();
1448       } else {
1449         if (mark_stack_->IsEmpty()) {
1450           break;
1451         }
1452         obj = mark_stack_->PopBack();
1453       }
1454       DCHECK(obj != NULL);
1455       ScanObject(obj);
1456     }
1457   }
1458   timings_.EndSplit();
1459 }
1460 
1461 // Walks the reference list marking any references subject to the
1462 // reference clearing policy.  References with a black referent are
1463 // removed from the list.  References with white referents biased
1464 // toward saving are blackened and also removed from the list.
PreserveSomeSoftReferences(Object ** list)1465 void MarkSweep::PreserveSomeSoftReferences(Object** list) {
1466   DCHECK(list != NULL);
1467   Object* clear = NULL;
1468   size_t counter = 0;
1469 
1470   DCHECK(mark_stack_->IsEmpty());
1471 
1472   timings_.StartSplit("PreserveSomeSoftReferences");
1473   while (*list != NULL) {
1474     Object* ref = heap_->DequeuePendingReference(list);
1475     Object* referent = heap_->GetReferenceReferent(ref);
1476     if (referent == NULL) {
1477       // Referent was cleared by the user during marking.
1478       continue;
1479     }
1480     bool is_marked = IsMarked(referent);
1481     if (!is_marked && ((++counter) & 1)) {
1482       // Referent is white and biased toward saving, mark it.
1483       MarkObject(referent);
1484       is_marked = true;
1485     }
1486     if (!is_marked) {
1487       // Referent is white, queue it for clearing.
1488       heap_->EnqueuePendingReference(ref, &clear);
1489     }
1490   }
1491   *list = clear;
1492   timings_.EndSplit();
1493 
1494   // Restart the mark with the newly black references added to the root set.
1495   ProcessMarkStack(true);
1496 }
1497 
IsMarked(const Object * object) const1498 inline bool MarkSweep::IsMarked(const Object* object) const
1499     SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1500   if (IsImmune(object)) {
1501     return true;
1502   }
1503   DCHECK(current_mark_bitmap_ != NULL);
1504   if (current_mark_bitmap_->HasAddress(object)) {
1505     return current_mark_bitmap_->Test(object);
1506   }
1507   return heap_->GetMarkBitmap()->Test(object);
1508 }
1509 
1510 // Unlink the reference list clearing references objects with white
1511 // referents.  Cleared references registered to a reference queue are
1512 // scheduled for appending by the heap worker thread.
ClearWhiteReferences(Object ** list)1513 void MarkSweep::ClearWhiteReferences(Object** list) {
1514   DCHECK(list != NULL);
1515   while (*list != NULL) {
1516     Object* ref = heap_->DequeuePendingReference(list);
1517     Object* referent = heap_->GetReferenceReferent(ref);
1518     if (referent != NULL && !IsMarked(referent)) {
1519       // Referent is white, clear it.
1520       heap_->ClearReferenceReferent(ref);
1521       if (heap_->IsEnqueuable(ref)) {
1522         heap_->EnqueueReference(ref, &cleared_reference_list_);
1523       }
1524     }
1525   }
1526   DCHECK(*list == NULL);
1527 }
1528 
1529 // Enqueues finalizer references with white referents.  White
1530 // referents are blackened, moved to the zombie field, and the
1531 // referent field is cleared.
EnqueueFinalizerReferences(Object ** list)1532 void MarkSweep::EnqueueFinalizerReferences(Object** list) {
1533   DCHECK(list != NULL);
1534   timings_.StartSplit("EnqueueFinalizerReferences");
1535   MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset();
1536   bool has_enqueued = false;
1537   while (*list != NULL) {
1538     Object* ref = heap_->DequeuePendingReference(list);
1539     Object* referent = heap_->GetReferenceReferent(ref);
1540     if (referent != NULL && !IsMarked(referent)) {
1541       MarkObject(referent);
1542       // If the referent is non-null the reference must queuable.
1543       DCHECK(heap_->IsEnqueuable(ref));
1544       ref->SetFieldObject(zombie_offset, referent, false);
1545       heap_->ClearReferenceReferent(ref);
1546       heap_->EnqueueReference(ref, &cleared_reference_list_);
1547       has_enqueued = true;
1548     }
1549   }
1550   timings_.EndSplit();
1551   if (has_enqueued) {
1552     ProcessMarkStack(true);
1553   }
1554   DCHECK(*list == NULL);
1555 }
1556 
1557 // Process reference class instances and schedule finalizations.
ProcessReferences(Object ** soft_references,bool clear_soft,Object ** weak_references,Object ** finalizer_references,Object ** phantom_references)1558 void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft,
1559                                   Object** weak_references,
1560                                   Object** finalizer_references,
1561                                   Object** phantom_references) {
1562   CHECK(soft_references != NULL);
1563   CHECK(weak_references != NULL);
1564   CHECK(finalizer_references != NULL);
1565   CHECK(phantom_references != NULL);
1566   CHECK(mark_stack_->IsEmpty());
1567 
1568   // Unless we are in the zygote or required to clear soft references
1569   // with white references, preserve some white referents.
1570   if (!clear_soft && !Runtime::Current()->IsZygote()) {
1571     PreserveSomeSoftReferences(soft_references);
1572   }
1573 
1574   timings_.StartSplit("ProcessReferences");
1575   // Clear all remaining soft and weak references with white
1576   // referents.
1577   ClearWhiteReferences(soft_references);
1578   ClearWhiteReferences(weak_references);
1579   timings_.EndSplit();
1580 
1581   // Preserve all white objects with finalize methods and schedule
1582   // them for finalization.
1583   EnqueueFinalizerReferences(finalizer_references);
1584 
1585   timings_.StartSplit("ProcessReferences");
1586   // Clear all f-reachable soft and weak references with white
1587   // referents.
1588   ClearWhiteReferences(soft_references);
1589   ClearWhiteReferences(weak_references);
1590 
1591   // Clear all phantom references with white referents.
1592   ClearWhiteReferences(phantom_references);
1593 
1594   // At this point all reference lists should be empty.
1595   DCHECK(*soft_references == NULL);
1596   DCHECK(*weak_references == NULL);
1597   DCHECK(*finalizer_references == NULL);
1598   DCHECK(*phantom_references == NULL);
1599   timings_.EndSplit();
1600 }
1601 
UnBindBitmaps()1602 void MarkSweep::UnBindBitmaps() {
1603   base::TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
1604   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1605     if (space->IsDlMallocSpace()) {
1606       space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
1607       if (alloc_space->temp_bitmap_.get() != NULL) {
1608         // At this point, the temp_bitmap holds our old mark bitmap.
1609         accounting::SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release();
1610         GetHeap()->GetMarkBitmap()->ReplaceBitmap(alloc_space->mark_bitmap_.get(), new_bitmap);
1611         CHECK_EQ(alloc_space->mark_bitmap_.release(), alloc_space->live_bitmap_.get());
1612         alloc_space->mark_bitmap_.reset(new_bitmap);
1613         DCHECK(alloc_space->temp_bitmap_.get() == NULL);
1614       }
1615     }
1616   }
1617 }
1618 
FinishPhase()1619 void MarkSweep::FinishPhase() {
1620   base::TimingLogger::ScopedSplit split("FinishPhase", &timings_);
1621   // Can't enqueue references if we hold the mutator lock.
1622   Object* cleared_references = GetClearedReferences();
1623   Heap* heap = GetHeap();
1624   timings_.NewSplit("EnqueueClearedReferences");
1625   heap->EnqueueClearedReferences(&cleared_references);
1626 
1627   timings_.NewSplit("PostGcVerification");
1628   heap->PostGcVerification(this);
1629 
1630   timings_.NewSplit("GrowForUtilization");
1631   heap->GrowForUtilization(GetGcType(), GetDurationNs());
1632 
1633   timings_.NewSplit("RequestHeapTrim");
1634   heap->RequestHeapTrim();
1635 
1636   // Update the cumulative statistics
1637   total_time_ns_ += GetDurationNs();
1638   total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0,
1639                                            std::plus<uint64_t>());
1640   total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects();
1641   total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes();
1642 
1643   // Ensure that the mark stack is empty.
1644   CHECK(mark_stack_->IsEmpty());
1645 
1646   if (kCountScannedTypes) {
1647     VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_
1648              << " other=" << other_count_;
1649   }
1650 
1651   if (kCountTasks) {
1652     VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_;
1653   }
1654 
1655   if (kMeasureOverhead) {
1656     VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_);
1657   }
1658 
1659   if (kProfileLargeObjects) {
1660     VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_;
1661   }
1662 
1663   if (kCountClassesMarked) {
1664     VLOG(gc) << "Classes marked " << classes_marked_;
1665   }
1666 
1667   if (kCountJavaLangRefs) {
1668     VLOG(gc) << "References scanned " << reference_count_;
1669   }
1670 
1671   // Update the cumulative loggers.
1672   cumulative_timings_.Start();
1673   cumulative_timings_.AddLogger(timings_);
1674   cumulative_timings_.End();
1675 
1676   // Clear all of the spaces' mark bitmaps.
1677   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1678     if (space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
1679       space->GetMarkBitmap()->Clear();
1680     }
1681   }
1682   mark_stack_->Reset();
1683 
1684   // Reset the marked large objects.
1685   space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
1686   large_objects->GetMarkObjects()->Clear();
1687 }
1688 
1689 }  // namespace collector
1690 }  // namespace gc
1691 }  // namespace art
1692