1 /*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "semi_space-inl.h"
18
19 #include <climits>
20 #include <functional>
21 #include <numeric>
22 #include <sstream>
23 #include <vector>
24
25 #include "base/logging.h" // For VLOG.
26 #include "base/macros.h"
27 #include "base/mutex-inl.h"
28 #include "base/timing_logger.h"
29 #include "gc/accounting/heap_bitmap-inl.h"
30 #include "gc/accounting/mod_union_table.h"
31 #include "gc/accounting/remembered_set.h"
32 #include "gc/accounting/space_bitmap-inl.h"
33 #include "gc/heap.h"
34 #include "gc/reference_processor.h"
35 #include "gc/space/bump_pointer_space-inl.h"
36 #include "gc/space/bump_pointer_space.h"
37 #include "gc/space/image_space.h"
38 #include "gc/space/large_object_space.h"
39 #include "gc/space/space-inl.h"
40 #include "indirect_reference_table.h"
41 #include "intern_table.h"
42 #include "jni/jni_internal.h"
43 #include "mark_sweep-inl.h"
44 #include "mirror/object-inl.h"
45 #include "mirror/object-refvisitor-inl.h"
46 #include "mirror/reference-inl.h"
47 #include "monitor.h"
48 #include "runtime.h"
49 #include "thread-inl.h"
50 #include "thread_list.h"
51 #include "write_barrier-inl.h"
52
53 using ::art::mirror::Object;
54
55 namespace art {
56 namespace gc {
57 namespace collector {
58
59 static constexpr bool kProtectFromSpace = true;
60 static constexpr bool kStoreStackTraces = false;
61
BindBitmaps()62 void SemiSpace::BindBitmaps() {
63 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
64 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
65 // Mark all of the spaces we never collect as immune.
66 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
67 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
68 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
69 immune_spaces_.AddSpace(space);
70 } else if (space->GetLiveBitmap() != nullptr) {
71 // TODO: We can probably also add this space to the immune region.
72 if (space == to_space_) {
73 CHECK(space->IsContinuousMemMapAllocSpace());
74 space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
75 }
76 }
77 }
78 }
79
SemiSpace(Heap * heap,const std::string & name_prefix)80 SemiSpace::SemiSpace(Heap* heap, const std::string& name_prefix)
81 : GarbageCollector(heap,
82 name_prefix + (name_prefix.empty() ? "" : " ") + "semispace"),
83 mark_stack_(nullptr),
84 to_space_(nullptr),
85 to_space_live_bitmap_(nullptr),
86 from_space_(nullptr),
87 mark_bitmap_(nullptr),
88 self_(nullptr),
89 fallback_space_(nullptr),
90 bytes_moved_(0U),
91 objects_moved_(0U),
92 saved_bytes_(0U),
93 collector_name_(name_),
94 swap_semi_spaces_(true) {
95 }
96
RunPhases()97 void SemiSpace::RunPhases() {
98 Thread* self = Thread::Current();
99 InitializePhase();
100 // Semi-space collector is special since it is sometimes called with the mutators suspended
101 // during the zygote creation and collector transitions. If we already exclusively hold the
102 // mutator lock, then we can't lock it again since it will cause a deadlock.
103 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
104 GetHeap()->PreGcVerificationPaused(this);
105 GetHeap()->PrePauseRosAllocVerification(this);
106 MarkingPhase();
107 ReclaimPhase();
108 GetHeap()->PostGcVerificationPaused(this);
109 } else {
110 Locks::mutator_lock_->AssertNotHeld(self);
111 {
112 ScopedPause pause(this);
113 GetHeap()->PreGcVerificationPaused(this);
114 GetHeap()->PrePauseRosAllocVerification(this);
115 MarkingPhase();
116 }
117 {
118 ReaderMutexLock mu(self, *Locks::mutator_lock_);
119 ReclaimPhase();
120 }
121 GetHeap()->PostGcVerification(this);
122 }
123 FinishPhase();
124 }
125
InitializePhase()126 void SemiSpace::InitializePhase() {
127 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
128 mark_stack_ = heap_->GetMarkStack();
129 DCHECK(mark_stack_ != nullptr);
130 immune_spaces_.Reset();
131 saved_bytes_ = 0;
132 bytes_moved_ = 0;
133 objects_moved_ = 0;
134 self_ = Thread::Current();
135 CHECK(from_space_->CanMoveObjects()) << "Attempting to move from " << *from_space_;
136 // Set the initial bitmap.
137 to_space_live_bitmap_ = to_space_->GetLiveBitmap();
138 {
139 // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
140 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
141 mark_bitmap_ = heap_->GetMarkBitmap();
142 }
143 fallback_space_ = GetHeap()->GetNonMovingSpace();
144 }
145
ProcessReferences(Thread * self)146 void SemiSpace::ProcessReferences(Thread* self) {
147 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
148 ReferenceProcessor* rp = GetHeap()->GetReferenceProcessor();
149 rp->Setup(self, this, /*concurrent=*/false, GetCurrentIteration()->GetClearSoftReferences());
150 rp->ProcessReferences(self, GetTimings());
151 }
152
MarkingPhase()153 void SemiSpace::MarkingPhase() {
154 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
155 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self_));
156 if (kStoreStackTraces) {
157 Locks::mutator_lock_->AssertExclusiveHeld(self_);
158 // Store the stack traces into the runtime fault string in case we Get a heap corruption
159 // related crash later.
160 ThreadState old_state = self_->SetStateUnsafe(ThreadState::kRunnable);
161 std::ostringstream oss;
162 Runtime* runtime = Runtime::Current();
163 runtime->GetThreadList()->DumpForSigQuit(oss);
164 runtime->GetThreadList()->DumpNativeStacks(oss);
165 runtime->SetFaultMessage(oss.str());
166 CHECK_EQ(self_->SetStateUnsafe(old_state), ThreadState::kRunnable);
167 }
168 // Revoke the thread local buffers since the GC may allocate into a RosAllocSpace and this helps
169 // to prevent fragmentation.
170 RevokeAllThreadLocalBuffers();
171
172 // Always clear soft references.
173 GetCurrentIteration()->SetClearSoftReferences(true);
174 Locks::mutator_lock_->AssertExclusiveHeld(self_);
175 // Assume the cleared space is already empty.
176 BindBitmaps();
177 // Process dirty cards and add dirty cards to mod-union tables.
178 heap_->ProcessCards(GetTimings(), /*use_rem_sets=*/false, false, true);
179 // Clear the whole card table since we cannot get any additional dirty cards during the
180 // paused GC. This saves memory but only works for pause the world collectors.
181 t.NewTiming("ClearCardTable");
182 heap_->GetCardTable()->ClearCardTable();
183 // Need to do this before the checkpoint since we don't want any threads to add references to
184 // the live stack during the recursive mark.
185 if (kUseThreadLocalAllocationStack) {
186 TimingLogger::ScopedTiming t2("RevokeAllThreadLocalAllocationStacks", GetTimings());
187 heap_->RevokeAllThreadLocalAllocationStacks(self_);
188 }
189 heap_->SwapStacks();
190 {
191 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
192 MarkRoots();
193 // Recursively mark remaining objects.
194 MarkReachableObjects();
195 }
196 ProcessReferences(self_);
197 {
198 ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_);
199 SweepSystemWeaks();
200 }
201 Runtime::Current()->BroadcastForNewSystemWeaks();
202 Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
203 // Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
204 // before they are properly counted.
205 RevokeAllThreadLocalBuffers();
206 GetHeap()->RecordFreeRevoke(); // This is for the non-moving rosalloc space.
207 // Record freed memory.
208 const int64_t from_bytes = from_space_->GetBytesAllocated();
209 const int64_t to_bytes = bytes_moved_;
210 const uint64_t from_objects = from_space_->GetObjectsAllocated();
211 const uint64_t to_objects = objects_moved_;
212 CHECK_LE(to_objects, from_objects);
213 // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed
214 // space.
215 RecordFree(ObjectBytePair(from_objects - to_objects, from_bytes - to_bytes));
216 // Clear and protect the from space.
217 from_space_->Clear();
218 // b/31172841. Temporarily disable the from-space protection with host debug build
219 // due to some protection issue in the build server.
220 if (kProtectFromSpace && !(kIsDebugBuild && !kIsTargetBuild)) {
221 if (!from_space_->IsRosAllocSpace()) {
222 // Protect with PROT_NONE.
223 VLOG(heap) << "Protecting from_space_ : " << *from_space_;
224 from_space_->GetMemMap()->Protect(PROT_NONE);
225 } else {
226 // If RosAllocSpace, we'll leave it as PROT_READ here so the
227 // rosaloc verification can read the metadata magic number and
228 // protect it with PROT_NONE later in FinishPhase().
229 VLOG(heap) << "Protecting from_space_ with PROT_READ : " << *from_space_;
230 from_space_->GetMemMap()->Protect(PROT_READ);
231 }
232 }
233 heap_->PreSweepingGcVerification(this);
234 if (swap_semi_spaces_) {
235 heap_->SwapSemiSpaces();
236 }
237 }
238
239 // Used to verify that there's no references to the from-space.
240 class SemiSpace::VerifyNoFromSpaceReferencesVisitor {
241 public:
VerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace * from_space)242 explicit VerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space)
243 : from_space_(from_space) {}
244
operator ()(Object * obj,MemberOffset offset,bool) const245 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
246 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
247 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
248 if (from_space_->HasAddress(ref)) {
249 LOG(FATAL) << ref << " found in from space";
250 }
251 }
252
253 // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const254 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
255 NO_THREAD_SAFETY_ANALYSIS {
256 if (!root->IsNull()) {
257 VisitRoot(root);
258 }
259 }
260
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const261 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
262 NO_THREAD_SAFETY_ANALYSIS {
263 if (kIsDebugBuild) {
264 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
265 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
266 }
267 CHECK(!from_space_->HasAddress(root->AsMirrorPtr()));
268 }
269
270 private:
271 space::ContinuousMemMapAllocSpace* const from_space_;
272 };
273
VerifyNoFromSpaceReferences(Object * obj)274 void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) {
275 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
276 VerifyNoFromSpaceReferencesVisitor visitor(from_space_);
277 obj->VisitReferences(visitor, VoidFunctor());
278 }
279
MarkReachableObjects()280 void SemiSpace::MarkReachableObjects() {
281 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
282 {
283 TimingLogger::ScopedTiming t2("MarkStackAsLive", GetTimings());
284 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
285 heap_->MarkAllocStackAsLive(live_stack);
286 live_stack->Reset();
287 }
288 for (auto& space : heap_->GetContinuousSpaces()) {
289 // If the space is immune then we need to mark the references to other spaces.
290 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
291 if (table != nullptr) {
292 // TODO: Improve naming.
293 TimingLogger::ScopedTiming t2(
294 space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
295 "UpdateAndMarkImageModUnionTable",
296 GetTimings());
297 table->UpdateAndMarkReferences(this);
298 DCHECK(GetHeap()->FindRememberedSetFromSpace(space) == nullptr);
299 } else if (space->IsImageSpace() && space->GetLiveBitmap() != nullptr) {
300 // If the space has no mod union table (the non-moving space, app image spaces, main spaces
301 // when the bump pointer space only collection is enabled,) then we need to scan its live
302 // bitmap or dirty cards as roots (including the objects on the live stack which have just
303 // marked in the live bitmap above in MarkAllocStackAsLive().)
304 accounting::RememberedSet* rem_set = GetHeap()->FindRememberedSetFromSpace(space);
305 if (!space->IsImageSpace()) {
306 DCHECK(space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())
307 << "Space " << space->GetName();
308 // App images currently do not have remembered sets.
309 } else {
310 DCHECK(rem_set == nullptr);
311 }
312 if (rem_set != nullptr) {
313 TimingLogger::ScopedTiming t2("UpdateAndMarkRememberedSet", GetTimings());
314 rem_set->UpdateAndMarkReferences(from_space_, this);
315 } else {
316 TimingLogger::ScopedTiming t2("VisitLiveBits", GetTimings());
317 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
318 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
319 reinterpret_cast<uintptr_t>(space->End()),
320 [this](mirror::Object* obj)
321 REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
322 ScanObject(obj);
323 });
324 }
325 if (kIsDebugBuild) {
326 // Verify that there are no from-space references that
327 // remain in the space, that is, the remembered set (and the
328 // card table) didn't miss any from-space references in the
329 // space.
330 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
331 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
332 reinterpret_cast<uintptr_t>(space->End()),
333 [this](Object* obj)
334 REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
335 DCHECK(obj != nullptr);
336 VerifyNoFromSpaceReferences(obj);
337 });
338 }
339 }
340 }
341 // Recursively process the mark stack.
342 ProcessMarkStack();
343 }
344
ReclaimPhase()345 void SemiSpace::ReclaimPhase() {
346 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
347 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
348 // Reclaim unmarked objects.
349 Sweep(false);
350 // Swap the live and mark bitmaps for each space which we modified space. This is an
351 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
352 // bitmaps.
353 SwapBitmaps();
354 // Unbind the live and mark bitmaps.
355 GetHeap()->UnBindBitmaps();
356 if (saved_bytes_ > 0) {
357 VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_);
358 }
359 }
360
ResizeMarkStack(size_t new_size)361 void SemiSpace::ResizeMarkStack(size_t new_size) {
362 std::vector<StackReference<Object>> temp(mark_stack_->Begin(), mark_stack_->End());
363 CHECK_LE(mark_stack_->Size(), new_size);
364 mark_stack_->Resize(new_size);
365 for (auto& obj : temp) {
366 mark_stack_->PushBack(obj.AsMirrorPtr());
367 }
368 }
369
MarkStackPush(Object * obj)370 inline void SemiSpace::MarkStackPush(Object* obj) {
371 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
372 ResizeMarkStack(mark_stack_->Capacity() * 2);
373 }
374 // The object must be pushed on to the mark stack.
375 mark_stack_->PushBack(obj);
376 }
377
CopyAvoidingDirtyingPages(void * dest,const void * src,size_t size)378 static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) {
379 if (LIKELY(size <= static_cast<size_t>(kPageSize))) {
380 // We will dirty the current page and somewhere in the middle of the next page. This means
381 // that the next object copied will also dirty that page.
382 // TODO: Worth considering the last object copied? We may end up dirtying one page which is
383 // not necessary per GC.
384 memcpy(dest, src, size);
385 return 0;
386 }
387 size_t saved_bytes = 0;
388 uint8_t* byte_dest = reinterpret_cast<uint8_t*>(dest);
389 if (kIsDebugBuild) {
390 for (size_t i = 0; i < size; ++i) {
391 CHECK_EQ(byte_dest[i], 0U);
392 }
393 }
394 // Process the start of the page. The page must already be dirty, don't bother with checking.
395 const uint8_t* byte_src = reinterpret_cast<const uint8_t*>(src);
396 const uint8_t* limit = byte_src + size;
397 size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest;
398 // Copy the bytes until the start of the next page.
399 memcpy(dest, src, page_remain);
400 byte_src += page_remain;
401 byte_dest += page_remain;
402 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize);
403 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t));
404 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t));
405 while (byte_src + kPageSize < limit) {
406 bool all_zero = true;
407 uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest);
408 const uintptr_t* word_src = reinterpret_cast<const uintptr_t*>(byte_src);
409 for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) {
410 // Assumes the destination of the copy is all zeros.
411 if (word_src[i] != 0) {
412 all_zero = false;
413 word_dest[i] = word_src[i];
414 }
415 }
416 if (all_zero) {
417 // Avoided copying into the page since it was all zeros.
418 saved_bytes += kPageSize;
419 }
420 byte_src += kPageSize;
421 byte_dest += kPageSize;
422 }
423 // Handle the part of the page at the end.
424 memcpy(byte_dest, byte_src, limit - byte_src);
425 return saved_bytes;
426 }
427
MarkNonForwardedObject(mirror::Object * obj)428 mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
429 const size_t object_size = obj->SizeOf();
430 size_t bytes_allocated, unused_bytes_tl_bulk_allocated;
431 // Copy it to the to-space.
432 mirror::Object* forward_address = to_space_->AllocThreadUnsafe(
433 self_, object_size, &bytes_allocated, nullptr, &unused_bytes_tl_bulk_allocated);
434
435 if (forward_address != nullptr && to_space_live_bitmap_ != nullptr) {
436 to_space_live_bitmap_->Set(forward_address);
437 }
438 // If it's still null, attempt to use the fallback space.
439 if (UNLIKELY(forward_address == nullptr)) {
440 forward_address = fallback_space_->AllocThreadUnsafe(
441 self_, object_size, &bytes_allocated, nullptr, &unused_bytes_tl_bulk_allocated);
442 CHECK(forward_address != nullptr) << "Out of memory in the to-space and fallback space.";
443 accounting::ContinuousSpaceBitmap* bitmap = fallback_space_->GetLiveBitmap();
444 if (bitmap != nullptr) {
445 bitmap->Set(forward_address);
446 }
447 }
448 ++objects_moved_;
449 bytes_moved_ += bytes_allocated;
450 // Copy over the object and add it to the mark stack since we still need to update its
451 // references.
452 saved_bytes_ +=
453 CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size);
454 if (kUseBakerReadBarrier) {
455 obj->AssertReadBarrierState();
456 forward_address->AssertReadBarrierState();
457 }
458 DCHECK(to_space_->HasAddress(forward_address) || fallback_space_->HasAddress(forward_address))
459 << forward_address << "\n" << GetHeap()->DumpSpaces();
460 return forward_address;
461 }
462
MarkObject(mirror::Object * root)463 mirror::Object* SemiSpace::MarkObject(mirror::Object* root) {
464 auto ref = StackReference<mirror::Object>::FromMirrorPtr(root);
465 MarkObjectIfNotInToSpace(&ref);
466 return ref.AsMirrorPtr();
467 }
468
MarkHeapReference(mirror::HeapReference<mirror::Object> * obj_ptr,bool do_atomic_update ATTRIBUTE_UNUSED)469 void SemiSpace::MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr,
470 bool do_atomic_update ATTRIBUTE_UNUSED) {
471 MarkObject(obj_ptr);
472 }
473
VisitRoots(mirror::Object *** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)474 void SemiSpace::VisitRoots(mirror::Object*** roots, size_t count,
475 const RootInfo& info ATTRIBUTE_UNUSED) {
476 for (size_t i = 0; i < count; ++i) {
477 auto* root = roots[i];
478 auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root);
479 // The root can be in the to-space since we may visit the declaring class of an ArtMethod
480 // multiple times if it is on the call stack.
481 MarkObjectIfNotInToSpace(&ref);
482 if (*root != ref.AsMirrorPtr()) {
483 *root = ref.AsMirrorPtr();
484 }
485 }
486 }
487
VisitRoots(mirror::CompressedReference<mirror::Object> ** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)488 void SemiSpace::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
489 const RootInfo& info ATTRIBUTE_UNUSED) {
490 for (size_t i = 0; i < count; ++i) {
491 MarkObjectIfNotInToSpace(roots[i]);
492 }
493 }
494
495 // Marks all objects in the root set.
MarkRoots()496 void SemiSpace::MarkRoots() {
497 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
498 Runtime::Current()->VisitRoots(this);
499 }
500
SweepSystemWeaks()501 void SemiSpace::SweepSystemWeaks() {
502 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
503 Runtime* runtime = Runtime::Current();
504 runtime->SweepSystemWeaks(this);
505 runtime->GetThreadList()->SweepInterpreterCaches(this);
506 }
507
ShouldSweepSpace(space::ContinuousSpace * space) const508 bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const {
509 return space != from_space_ && space != to_space_;
510 }
511
Sweep(bool swap_bitmaps)512 void SemiSpace::Sweep(bool swap_bitmaps) {
513 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
514 DCHECK(mark_stack_->IsEmpty());
515 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
516 if (space->IsContinuousMemMapAllocSpace()) {
517 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
518 if (!ShouldSweepSpace(alloc_space)) {
519 continue;
520 }
521 TimingLogger::ScopedTiming split(
522 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
523 RecordFree(alloc_space->Sweep(swap_bitmaps));
524 }
525 }
526 SweepLargeObjects(swap_bitmaps);
527 }
528
SweepLargeObjects(bool swap_bitmaps)529 void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
530 space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
531 if (los != nullptr) {
532 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
533 RecordFreeLOS(los->Sweep(swap_bitmaps));
534 }
535 }
536
537 // Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
538 // marked, put it on the appropriate list in the heap for later processing.
DelayReferenceReferent(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> reference)539 void SemiSpace::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
540 ObjPtr<mirror::Reference> reference) {
541 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
542 }
543
544 class SemiSpace::MarkObjectVisitor {
545 public:
MarkObjectVisitor(SemiSpace * collector)546 explicit MarkObjectVisitor(SemiSpace* collector) : collector_(collector) {}
547
operator ()(ObjPtr<Object> obj,MemberOffset offset,bool) const548 void operator()(ObjPtr<Object> obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE
549 REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
550 // Object was already verified when we scanned it.
551 collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
552 }
553
operator ()(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref) const554 void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
555 REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
556 collector_->DelayReferenceReferent(klass, ref);
557 }
558
559 // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const560 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
561 NO_THREAD_SAFETY_ANALYSIS {
562 if (!root->IsNull()) {
563 VisitRoot(root);
564 }
565 }
566
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const567 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
568 NO_THREAD_SAFETY_ANALYSIS {
569 if (kIsDebugBuild) {
570 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
571 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
572 }
573 // We may visit the same root multiple times, so avoid marking things in the to-space since
574 // this is not handled by the GC.
575 collector_->MarkObjectIfNotInToSpace(root);
576 }
577
578 private:
579 SemiSpace* const collector_;
580 };
581
582 // Visit all of the references of an object and update.
ScanObject(Object * obj)583 void SemiSpace::ScanObject(Object* obj) {
584 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
585 MarkObjectVisitor visitor(this);
586 // Turn off read barrier. ZygoteCompactingCollector doesn't use it (even in the CC build.)
587 obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
588 visitor, visitor);
589 }
590
591 // Scan anything that's on the mark stack.
ProcessMarkStack()592 void SemiSpace::ProcessMarkStack() {
593 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
594 while (!mark_stack_->IsEmpty()) {
595 Object* obj = mark_stack_->PopBack();
596 ScanObject(obj);
597 }
598 }
599
IsMarked(mirror::Object * obj)600 mirror::Object* SemiSpace::IsMarked(mirror::Object* obj) {
601 // All immune objects are assumed marked.
602 if (from_space_->HasAddress(obj)) {
603 // Returns either the forwarding address or null.
604 return GetForwardingAddressInFromSpace(obj);
605 } else if (immune_spaces_.IsInImmuneRegion(obj) || to_space_->HasAddress(obj)) {
606 return obj; // Already forwarded, must be marked.
607 }
608 return mark_bitmap_->Test(obj) ? obj : nullptr;
609 }
610
IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object> * object,bool do_atomic_update ATTRIBUTE_UNUSED)611 bool SemiSpace::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
612 // SemiSpace does the GC in a pause. No CAS needed.
613 bool do_atomic_update ATTRIBUTE_UNUSED) {
614 mirror::Object* obj = object->AsMirrorPtr();
615 if (obj == nullptr) {
616 return true;
617 }
618 mirror::Object* new_obj = IsMarked(obj);
619 if (new_obj == nullptr) {
620 return false;
621 }
622 if (new_obj != obj) {
623 // Write barrier is not necessary since it still points to the same object, just at a different
624 // address.
625 object->Assign(new_obj);
626 }
627 return true;
628 }
629
SetToSpace(space::ContinuousMemMapAllocSpace * to_space)630 void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) {
631 DCHECK(to_space != nullptr);
632 to_space_ = to_space;
633 }
634
SetFromSpace(space::ContinuousMemMapAllocSpace * from_space)635 void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) {
636 DCHECK(from_space != nullptr);
637 from_space_ = from_space;
638 }
639
FinishPhase()640 void SemiSpace::FinishPhase() {
641 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
642 // b/31172841. Temporarily disable the from-space protection with host debug build
643 // due to some protection issue in the build server.
644 if (kProtectFromSpace && !(kIsDebugBuild && !kIsTargetBuild)) {
645 if (from_space_->IsRosAllocSpace()) {
646 VLOG(heap) << "Protecting from_space_ with PROT_NONE : " << *from_space_;
647 from_space_->GetMemMap()->Protect(PROT_NONE);
648 }
649 }
650 // Null the "to" and "from" spaces since compacting from one to the other isn't valid until
651 // further action is done by the heap.
652 to_space_ = nullptr;
653 from_space_ = nullptr;
654 CHECK(mark_stack_->IsEmpty());
655 mark_stack_->Reset();
656 // Clear all of the spaces' mark bitmaps.
657 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
658 heap_->ClearMarkedObjects();
659 }
660
RevokeAllThreadLocalBuffers()661 void SemiSpace::RevokeAllThreadLocalBuffers() {
662 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
663 GetHeap()->RevokeAllThreadLocalBuffers();
664 }
665
666 } // namespace collector
667 } // namespace gc
668 } // namespace art
669