1 /*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "semi_space-inl.h"
18
19 #include <climits>
20 #include <functional>
21 #include <numeric>
22 #include <sstream>
23 #include <vector>
24
25 #include "base/logging.h"
26 #include "base/macros.h"
27 #include "base/mutex-inl.h"
28 #include "base/timing_logger.h"
29 #include "gc/accounting/heap_bitmap-inl.h"
30 #include "gc/accounting/mod_union_table.h"
31 #include "gc/accounting/remembered_set.h"
32 #include "gc/accounting/space_bitmap-inl.h"
33 #include "gc/heap.h"
34 #include "gc/reference_processor.h"
35 #include "gc/space/bump_pointer_space.h"
36 #include "gc/space/bump_pointer_space-inl.h"
37 #include "gc/space/image_space.h"
38 #include "gc/space/large_object_space.h"
39 #include "gc/space/space-inl.h"
40 #include "indirect_reference_table.h"
41 #include "intern_table.h"
42 #include "jni_internal.h"
43 #include "mark_sweep-inl.h"
44 #include "monitor.h"
45 #include "mirror/reference-inl.h"
46 #include "mirror/object-inl.h"
47 #include "runtime.h"
48 #include "thread-inl.h"
49 #include "thread_list.h"
50
51 using ::art::mirror::Object;
52
53 namespace art {
54 namespace gc {
55 namespace collector {
56
57 static constexpr bool kProtectFromSpace = true;
58 static constexpr bool kStoreStackTraces = false;
59 static constexpr size_t kBytesPromotedThreshold = 4 * MB;
60 static constexpr size_t kLargeObjectBytesAllocatedThreshold = 16 * MB;
61
BindBitmaps()62 void SemiSpace::BindBitmaps() {
63 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
64 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
65 // Mark all of the spaces we never collect as immune.
66 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
67 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
68 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
69 immune_spaces_.AddSpace(space);
70 } else if (space->GetLiveBitmap() != nullptr) {
71 // TODO: We can probably also add this space to the immune region.
72 if (space == to_space_ || collect_from_space_only_) {
73 if (collect_from_space_only_) {
74 // Bind the bitmaps of the main free list space and the non-moving space we are doing a
75 // bump pointer space only collection.
76 CHECK(space == GetHeap()->GetPrimaryFreeListSpace() ||
77 space == GetHeap()->GetNonMovingSpace());
78 }
79 CHECK(space->IsContinuousMemMapAllocSpace());
80 space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
81 }
82 }
83 }
84 if (collect_from_space_only_) {
85 // We won't collect the large object space if a bump pointer space only collection.
86 is_large_object_space_immune_ = true;
87 }
88 }
89
SemiSpace(Heap * heap,bool generational,const std::string & name_prefix)90 SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix)
91 : GarbageCollector(heap,
92 name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"),
93 mark_stack_(nullptr),
94 is_large_object_space_immune_(false),
95 to_space_(nullptr),
96 to_space_live_bitmap_(nullptr),
97 from_space_(nullptr),
98 mark_bitmap_(nullptr),
99 self_(nullptr),
100 generational_(generational),
101 last_gc_to_space_end_(nullptr),
102 bytes_promoted_(0),
103 bytes_promoted_since_last_whole_heap_collection_(0),
104 large_object_bytes_allocated_at_last_whole_heap_collection_(0),
105 collect_from_space_only_(generational),
106 promo_dest_space_(nullptr),
107 fallback_space_(nullptr),
108 bytes_moved_(0U),
109 objects_moved_(0U),
110 saved_bytes_(0U),
111 collector_name_(name_),
112 swap_semi_spaces_(true) {
113 }
114
RunPhases()115 void SemiSpace::RunPhases() {
116 Thread* self = Thread::Current();
117 InitializePhase();
118 // Semi-space collector is special since it is sometimes called with the mutators suspended
119 // during the zygote creation and collector transitions. If we already exclusively hold the
120 // mutator lock, then we can't lock it again since it will cause a deadlock.
121 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
122 GetHeap()->PreGcVerificationPaused(this);
123 GetHeap()->PrePauseRosAllocVerification(this);
124 MarkingPhase();
125 ReclaimPhase();
126 GetHeap()->PostGcVerificationPaused(this);
127 } else {
128 Locks::mutator_lock_->AssertNotHeld(self);
129 {
130 ScopedPause pause(this);
131 GetHeap()->PreGcVerificationPaused(this);
132 GetHeap()->PrePauseRosAllocVerification(this);
133 MarkingPhase();
134 }
135 {
136 ReaderMutexLock mu(self, *Locks::mutator_lock_);
137 ReclaimPhase();
138 }
139 GetHeap()->PostGcVerification(this);
140 }
141 FinishPhase();
142 }
143
InitializePhase()144 void SemiSpace::InitializePhase() {
145 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
146 mark_stack_ = heap_->GetMarkStack();
147 DCHECK(mark_stack_ != nullptr);
148 immune_spaces_.Reset();
149 is_large_object_space_immune_ = false;
150 saved_bytes_ = 0;
151 bytes_moved_ = 0;
152 objects_moved_ = 0;
153 self_ = Thread::Current();
154 CHECK(from_space_->CanMoveObjects()) << "Attempting to move from " << *from_space_;
155 // Set the initial bitmap.
156 to_space_live_bitmap_ = to_space_->GetLiveBitmap();
157 {
158 // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
159 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
160 mark_bitmap_ = heap_->GetMarkBitmap();
161 }
162 if (generational_) {
163 promo_dest_space_ = GetHeap()->GetPrimaryFreeListSpace();
164 }
165 fallback_space_ = GetHeap()->GetNonMovingSpace();
166 }
167
ProcessReferences(Thread * self)168 void SemiSpace::ProcessReferences(Thread* self) {
169 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
170 GetHeap()->GetReferenceProcessor()->ProcessReferences(
171 false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
172 }
173
MarkingPhase()174 void SemiSpace::MarkingPhase() {
175 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
176 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self_));
177 if (kStoreStackTraces) {
178 Locks::mutator_lock_->AssertExclusiveHeld(self_);
179 // Store the stack traces into the runtime fault string in case we Get a heap corruption
180 // related crash later.
181 ThreadState old_state = self_->SetStateUnsafe(kRunnable);
182 std::ostringstream oss;
183 Runtime* runtime = Runtime::Current();
184 runtime->GetThreadList()->DumpForSigQuit(oss);
185 runtime->GetThreadList()->DumpNativeStacks(oss);
186 runtime->SetFaultMessage(oss.str());
187 CHECK_EQ(self_->SetStateUnsafe(old_state), kRunnable);
188 }
189 // Revoke the thread local buffers since the GC may allocate into a RosAllocSpace and this helps
190 // to prevent fragmentation.
191 RevokeAllThreadLocalBuffers();
192 if (generational_) {
193 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
194 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
195 GetCurrentIteration()->GetClearSoftReferences()) {
196 // If an explicit, native allocation-triggered, or last attempt
197 // collection, collect the whole heap.
198 collect_from_space_only_ = false;
199 }
200 if (!collect_from_space_only_) {
201 VLOG(heap) << "Whole heap collection";
202 name_ = collector_name_ + " whole";
203 } else {
204 VLOG(heap) << "Bump pointer space only collection";
205 name_ = collector_name_ + " bps";
206 }
207 }
208
209 if (!collect_from_space_only_) {
210 // If non-generational, always clear soft references.
211 // If generational, clear soft references if a whole heap collection.
212 GetCurrentIteration()->SetClearSoftReferences(true);
213 }
214 Locks::mutator_lock_->AssertExclusiveHeld(self_);
215 if (generational_) {
216 // If last_gc_to_space_end_ is out of the bounds of the from-space
217 // (the to-space from last GC), then point it to the beginning of
218 // the from-space. For example, the very first GC or the
219 // pre-zygote compaction.
220 if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) {
221 last_gc_to_space_end_ = from_space_->Begin();
222 }
223 // Reset this before the marking starts below.
224 bytes_promoted_ = 0;
225 }
226 // Assume the cleared space is already empty.
227 BindBitmaps();
228 // Process dirty cards and add dirty cards to mod-union tables.
229 heap_->ProcessCards(GetTimings(), kUseRememberedSet && generational_, false, true);
230 // Clear the whole card table since we cannot get any additional dirty cards during the
231 // paused GC. This saves memory but only works for pause the world collectors.
232 t.NewTiming("ClearCardTable");
233 heap_->GetCardTable()->ClearCardTable();
234 // Need to do this before the checkpoint since we don't want any threads to add references to
235 // the live stack during the recursive mark.
236 if (kUseThreadLocalAllocationStack) {
237 TimingLogger::ScopedTiming t2("RevokeAllThreadLocalAllocationStacks", GetTimings());
238 heap_->RevokeAllThreadLocalAllocationStacks(self_);
239 }
240 heap_->SwapStacks();
241 {
242 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
243 MarkRoots();
244 // Recursively mark remaining objects.
245 MarkReachableObjects();
246 }
247 ProcessReferences(self_);
248 {
249 ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_);
250 SweepSystemWeaks();
251 }
252 Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
253 // Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
254 // before they are properly counted.
255 RevokeAllThreadLocalBuffers();
256 GetHeap()->RecordFreeRevoke(); // this is for the non-moving rosalloc space used by GSS.
257 // Record freed memory.
258 const int64_t from_bytes = from_space_->GetBytesAllocated();
259 const int64_t to_bytes = bytes_moved_;
260 const uint64_t from_objects = from_space_->GetObjectsAllocated();
261 const uint64_t to_objects = objects_moved_;
262 CHECK_LE(to_objects, from_objects);
263 // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed
264 // space.
265 RecordFree(ObjectBytePair(from_objects - to_objects, from_bytes - to_bytes));
266 // Clear and protect the from space.
267 from_space_->Clear();
268 if (kProtectFromSpace && !from_space_->IsRosAllocSpace()) {
269 // Protect with PROT_NONE.
270 VLOG(heap) << "Protecting from_space_ : " << *from_space_;
271 from_space_->GetMemMap()->Protect(PROT_NONE);
272 } else {
273 // If RosAllocSpace, we'll leave it as PROT_READ here so the
274 // rosaloc verification can read the metadata magic number and
275 // protect it with PROT_NONE later in FinishPhase().
276 VLOG(heap) << "Protecting from_space_ with PROT_READ : " << *from_space_;
277 from_space_->GetMemMap()->Protect(PROT_READ);
278 }
279 heap_->PreSweepingGcVerification(this);
280 if (swap_semi_spaces_) {
281 heap_->SwapSemiSpaces();
282 }
283 }
284
285 // Used to verify that there's no references to the from-space.
286 class SemiSpace::VerifyNoFromSpaceReferencesVisitor {
287 public:
VerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace * from_space)288 explicit VerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space)
289 : from_space_(from_space) {}
290
operator ()(Object * obj,MemberOffset offset,bool) const291 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
292 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
293 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
294 if (from_space_->HasAddress(ref)) {
295 Runtime::Current()->GetHeap()->DumpObject(LOG(INFO), obj);
296 LOG(FATAL) << ref << " found in from space";
297 }
298 }
299
300 // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const301 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
302 NO_THREAD_SAFETY_ANALYSIS {
303 if (!root->IsNull()) {
304 VisitRoot(root);
305 }
306 }
307
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const308 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
309 NO_THREAD_SAFETY_ANALYSIS {
310 if (kIsDebugBuild) {
311 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
312 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
313 }
314 CHECK(!from_space_->HasAddress(root->AsMirrorPtr()));
315 }
316
317 private:
318 space::ContinuousMemMapAllocSpace* const from_space_;
319 };
320
VerifyNoFromSpaceReferences(Object * obj)321 void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) {
322 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
323 VerifyNoFromSpaceReferencesVisitor visitor(from_space_);
324 obj->VisitReferences(visitor, VoidFunctor());
325 }
326
MarkReachableObjects()327 void SemiSpace::MarkReachableObjects() {
328 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
329 {
330 TimingLogger::ScopedTiming t2("MarkStackAsLive", GetTimings());
331 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
332 heap_->MarkAllocStackAsLive(live_stack);
333 live_stack->Reset();
334 }
335 for (auto& space : heap_->GetContinuousSpaces()) {
336 // If the space is immune then we need to mark the references to other spaces.
337 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
338 if (table != nullptr) {
339 // TODO: Improve naming.
340 TimingLogger::ScopedTiming t2(
341 space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
342 "UpdateAndMarkImageModUnionTable",
343 GetTimings());
344 table->UpdateAndMarkReferences(this);
345 DCHECK(GetHeap()->FindRememberedSetFromSpace(space) == nullptr);
346 } else if ((space->IsImageSpace() || collect_from_space_only_) &&
347 space->GetLiveBitmap() != nullptr) {
348 // If the space has no mod union table (the non-moving space, app image spaces, main spaces
349 // when the bump pointer space only collection is enabled,) then we need to scan its live
350 // bitmap or dirty cards as roots (including the objects on the live stack which have just
351 // marked in the live bitmap above in MarkAllocStackAsLive().)
352 accounting::RememberedSet* rem_set = GetHeap()->FindRememberedSetFromSpace(space);
353 if (!space->IsImageSpace()) {
354 DCHECK(space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())
355 << "Space " << space->GetName() << " "
356 << "generational_=" << generational_ << " "
357 << "collect_from_space_only_=" << collect_from_space_only_;
358 // App images currently do not have remembered sets.
359 DCHECK_EQ(kUseRememberedSet, rem_set != nullptr);
360 } else {
361 DCHECK(rem_set == nullptr);
362 }
363 if (rem_set != nullptr) {
364 TimingLogger::ScopedTiming t2("UpdateAndMarkRememberedSet", GetTimings());
365 rem_set->UpdateAndMarkReferences(from_space_, this);
366 } else {
367 TimingLogger::ScopedTiming t2("VisitLiveBits", GetTimings());
368 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
369 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
370 reinterpret_cast<uintptr_t>(space->End()),
371 [this](mirror::Object* obj)
372 REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
373 ScanObject(obj);
374 });
375 }
376 if (kIsDebugBuild) {
377 // Verify that there are no from-space references that
378 // remain in the space, that is, the remembered set (and the
379 // card table) didn't miss any from-space references in the
380 // space.
381 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
382 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
383 reinterpret_cast<uintptr_t>(space->End()),
384 [this](Object* obj)
385 SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
386 DCHECK(obj != nullptr);
387 VerifyNoFromSpaceReferences(obj);
388 });
389 }
390 }
391 }
392
393 CHECK_EQ(is_large_object_space_immune_, collect_from_space_only_);
394 space::LargeObjectSpace* los = GetHeap()->GetLargeObjectsSpace();
395 if (is_large_object_space_immune_ && los != nullptr) {
396 TimingLogger::ScopedTiming t2("VisitLargeObjects", GetTimings());
397 DCHECK(collect_from_space_only_);
398 // Delay copying the live set to the marked set until here from
399 // BindBitmaps() as the large objects on the allocation stack may
400 // be newly added to the live set above in MarkAllocStackAsLive().
401 los->CopyLiveToMarked();
402
403 // When the large object space is immune, we need to scan the
404 // large object space as roots as they contain references to their
405 // classes (primitive array classes) that could move though they
406 // don't contain any other references.
407 accounting::LargeObjectBitmap* large_live_bitmap = los->GetLiveBitmap();
408 large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(los->Begin()),
409 reinterpret_cast<uintptr_t>(los->End()),
410 [this](mirror::Object* obj)
411 REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
412 ScanObject(obj);
413 });
414 }
415 // Recursively process the mark stack.
416 ProcessMarkStack();
417 }
418
ReclaimPhase()419 void SemiSpace::ReclaimPhase() {
420 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
421 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
422 // Reclaim unmarked objects.
423 Sweep(false);
424 // Swap the live and mark bitmaps for each space which we modified space. This is an
425 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
426 // bitmaps.
427 SwapBitmaps();
428 // Unbind the live and mark bitmaps.
429 GetHeap()->UnBindBitmaps();
430 if (saved_bytes_ > 0) {
431 VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_);
432 }
433 if (generational_) {
434 // Record the end (top) of the to space so we can distinguish
435 // between objects that were allocated since the last GC and the
436 // older objects.
437 last_gc_to_space_end_ = to_space_->End();
438 }
439 }
440
ResizeMarkStack(size_t new_size)441 void SemiSpace::ResizeMarkStack(size_t new_size) {
442 std::vector<StackReference<Object>> temp(mark_stack_->Begin(), mark_stack_->End());
443 CHECK_LE(mark_stack_->Size(), new_size);
444 mark_stack_->Resize(new_size);
445 for (auto& obj : temp) {
446 mark_stack_->PushBack(obj.AsMirrorPtr());
447 }
448 }
449
MarkStackPush(Object * obj)450 inline void SemiSpace::MarkStackPush(Object* obj) {
451 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
452 ResizeMarkStack(mark_stack_->Capacity() * 2);
453 }
454 // The object must be pushed on to the mark stack.
455 mark_stack_->PushBack(obj);
456 }
457
CopyAvoidingDirtyingPages(void * dest,const void * src,size_t size)458 static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) {
459 if (LIKELY(size <= static_cast<size_t>(kPageSize))) {
460 // We will dirty the current page and somewhere in the middle of the next page. This means
461 // that the next object copied will also dirty that page.
462 // TODO: Worth considering the last object copied? We may end up dirtying one page which is
463 // not necessary per GC.
464 memcpy(dest, src, size);
465 return 0;
466 }
467 size_t saved_bytes = 0;
468 uint8_t* byte_dest = reinterpret_cast<uint8_t*>(dest);
469 if (kIsDebugBuild) {
470 for (size_t i = 0; i < size; ++i) {
471 CHECK_EQ(byte_dest[i], 0U);
472 }
473 }
474 // Process the start of the page. The page must already be dirty, don't bother with checking.
475 const uint8_t* byte_src = reinterpret_cast<const uint8_t*>(src);
476 const uint8_t* limit = byte_src + size;
477 size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest;
478 // Copy the bytes until the start of the next page.
479 memcpy(dest, src, page_remain);
480 byte_src += page_remain;
481 byte_dest += page_remain;
482 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize);
483 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t));
484 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t));
485 while (byte_src + kPageSize < limit) {
486 bool all_zero = true;
487 uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest);
488 const uintptr_t* word_src = reinterpret_cast<const uintptr_t*>(byte_src);
489 for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) {
490 // Assumes the destination of the copy is all zeros.
491 if (word_src[i] != 0) {
492 all_zero = false;
493 word_dest[i] = word_src[i];
494 }
495 }
496 if (all_zero) {
497 // Avoided copying into the page since it was all zeros.
498 saved_bytes += kPageSize;
499 }
500 byte_src += kPageSize;
501 byte_dest += kPageSize;
502 }
503 // Handle the part of the page at the end.
504 memcpy(byte_dest, byte_src, limit - byte_src);
505 return saved_bytes;
506 }
507
MarkNonForwardedObject(mirror::Object * obj)508 mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
509 const size_t object_size = obj->SizeOf();
510 size_t bytes_allocated, dummy;
511 mirror::Object* forward_address = nullptr;
512 if (generational_ && reinterpret_cast<uint8_t*>(obj) < last_gc_to_space_end_) {
513 // If it's allocated before the last GC (older), move
514 // (pseudo-promote) it to the main free list space (as sort
515 // of an old generation.)
516 forward_address = promo_dest_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
517 nullptr, &dummy);
518 if (UNLIKELY(forward_address == nullptr)) {
519 // If out of space, fall back to the to-space.
520 forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr,
521 &dummy);
522 // No logic for marking the bitmap, so it must be null.
523 DCHECK(to_space_live_bitmap_ == nullptr);
524 } else {
525 bytes_promoted_ += bytes_allocated;
526 // Dirty the card at the destionation as it may contain
527 // references (including the class pointer) to the bump pointer
528 // space.
529 GetHeap()->WriteBarrierEveryFieldOf(forward_address);
530 // Handle the bitmaps marking.
531 accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space_->GetLiveBitmap();
532 DCHECK(live_bitmap != nullptr);
533 accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap();
534 DCHECK(mark_bitmap != nullptr);
535 DCHECK(!live_bitmap->Test(forward_address));
536 if (collect_from_space_only_) {
537 // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap.
538 DCHECK_EQ(live_bitmap, mark_bitmap);
539
540 // If a bump pointer space only collection, delay the live
541 // bitmap marking of the promoted object until it's popped off
542 // the mark stack (ProcessMarkStack()). The rationale: we may
543 // be in the middle of scanning the objects in the promo
544 // destination space for
545 // non-moving-space-to-bump-pointer-space references by
546 // iterating over the marked bits of the live bitmap
547 // (MarkReachableObjects()). If we don't delay it (and instead
548 // mark the promoted object here), the above promo destination
549 // space scan could encounter the just-promoted object and
550 // forward the references in the promoted object's fields even
551 // through it is pushed onto the mark stack. If this happens,
552 // the promoted object would be in an inconsistent state, that
553 // is, it's on the mark stack (gray) but its fields are
554 // already forwarded (black), which would cause a
555 // DCHECK(!to_space_->HasAddress(obj)) failure below.
556 } else {
557 // Mark forward_address on the live bit map.
558 live_bitmap->Set(forward_address);
559 // Mark forward_address on the mark bit map.
560 DCHECK(!mark_bitmap->Test(forward_address));
561 mark_bitmap->Set(forward_address);
562 }
563 }
564 } else {
565 // If it's allocated after the last GC (younger), copy it to the to-space.
566 forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr,
567 &dummy);
568 if (forward_address != nullptr && to_space_live_bitmap_ != nullptr) {
569 to_space_live_bitmap_->Set(forward_address);
570 }
571 }
572 // If it's still null, attempt to use the fallback space.
573 if (UNLIKELY(forward_address == nullptr)) {
574 forward_address = fallback_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
575 nullptr, &dummy);
576 CHECK(forward_address != nullptr) << "Out of memory in the to-space and fallback space.";
577 accounting::ContinuousSpaceBitmap* bitmap = fallback_space_->GetLiveBitmap();
578 if (bitmap != nullptr) {
579 bitmap->Set(forward_address);
580 }
581 }
582 ++objects_moved_;
583 bytes_moved_ += bytes_allocated;
584 // Copy over the object and add it to the mark stack since we still need to update its
585 // references.
586 saved_bytes_ +=
587 CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size);
588 if (kUseBakerOrBrooksReadBarrier) {
589 obj->AssertReadBarrierPointer();
590 if (kUseBrooksReadBarrier) {
591 DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj);
592 forward_address->SetReadBarrierPointer(forward_address);
593 }
594 forward_address->AssertReadBarrierPointer();
595 }
596 DCHECK(to_space_->HasAddress(forward_address) ||
597 fallback_space_->HasAddress(forward_address) ||
598 (generational_ && promo_dest_space_->HasAddress(forward_address)))
599 << forward_address << "\n" << GetHeap()->DumpSpaces();
600 return forward_address;
601 }
602
MarkObject(mirror::Object * root)603 mirror::Object* SemiSpace::MarkObject(mirror::Object* root) {
604 auto ref = StackReference<mirror::Object>::FromMirrorPtr(root);
605 MarkObjectIfNotInToSpace(&ref);
606 return ref.AsMirrorPtr();
607 }
608
MarkHeapReference(mirror::HeapReference<mirror::Object> * obj_ptr)609 void SemiSpace::MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr) {
610 MarkObject(obj_ptr);
611 }
612
VisitRoots(mirror::Object *** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)613 void SemiSpace::VisitRoots(mirror::Object*** roots, size_t count,
614 const RootInfo& info ATTRIBUTE_UNUSED) {
615 for (size_t i = 0; i < count; ++i) {
616 auto* root = roots[i];
617 auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root);
618 // The root can be in the to-space since we may visit the declaring class of an ArtMethod
619 // multiple times if it is on the call stack.
620 MarkObjectIfNotInToSpace(&ref);
621 if (*root != ref.AsMirrorPtr()) {
622 *root = ref.AsMirrorPtr();
623 }
624 }
625 }
626
VisitRoots(mirror::CompressedReference<mirror::Object> ** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)627 void SemiSpace::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
628 const RootInfo& info ATTRIBUTE_UNUSED) {
629 for (size_t i = 0; i < count; ++i) {
630 MarkObjectIfNotInToSpace(roots[i]);
631 }
632 }
633
634 // Marks all objects in the root set.
MarkRoots()635 void SemiSpace::MarkRoots() {
636 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
637 Runtime::Current()->VisitRoots(this);
638 }
639
SweepSystemWeaks()640 void SemiSpace::SweepSystemWeaks() {
641 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
642 Runtime::Current()->SweepSystemWeaks(this);
643 }
644
ShouldSweepSpace(space::ContinuousSpace * space) const645 bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const {
646 return space != from_space_ && space != to_space_;
647 }
648
Sweep(bool swap_bitmaps)649 void SemiSpace::Sweep(bool swap_bitmaps) {
650 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
651 DCHECK(mark_stack_->IsEmpty());
652 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
653 if (space->IsContinuousMemMapAllocSpace()) {
654 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
655 if (!ShouldSweepSpace(alloc_space)) {
656 continue;
657 }
658 TimingLogger::ScopedTiming split(
659 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
660 RecordFree(alloc_space->Sweep(swap_bitmaps));
661 }
662 }
663 if (!is_large_object_space_immune_) {
664 SweepLargeObjects(swap_bitmaps);
665 }
666 }
667
SweepLargeObjects(bool swap_bitmaps)668 void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
669 DCHECK(!is_large_object_space_immune_);
670 space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
671 if (los != nullptr) {
672 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
673 RecordFreeLOS(los->Sweep(swap_bitmaps));
674 }
675 }
676
677 // Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
678 // marked, put it on the appropriate list in the heap for later processing.
DelayReferenceReferent(mirror::Class * klass,mirror::Reference * reference)679 void SemiSpace::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
680 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
681 }
682
683 class SemiSpace::MarkObjectVisitor {
684 public:
MarkObjectVisitor(SemiSpace * collector)685 explicit MarkObjectVisitor(SemiSpace* collector) : collector_(collector) {}
686
operator ()(Object * obj,MemberOffset offset,bool) const687 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE
688 REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
689 // Object was already verified when we scanned it.
690 collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
691 }
692
operator ()(mirror::Class * klass,mirror::Reference * ref) const693 void operator()(mirror::Class* klass, mirror::Reference* ref) const
694 REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
695 collector_->DelayReferenceReferent(klass, ref);
696 }
697
698 // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const699 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
700 NO_THREAD_SAFETY_ANALYSIS {
701 if (!root->IsNull()) {
702 VisitRoot(root);
703 }
704 }
705
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const706 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
707 NO_THREAD_SAFETY_ANALYSIS {
708 if (kIsDebugBuild) {
709 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
710 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
711 }
712 // We may visit the same root multiple times, so avoid marking things in the to-space since
713 // this is not handled by the GC.
714 collector_->MarkObjectIfNotInToSpace(root);
715 }
716
717 private:
718 SemiSpace* const collector_;
719 };
720
721 // Visit all of the references of an object and update.
ScanObject(Object * obj)722 void SemiSpace::ScanObject(Object* obj) {
723 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
724 MarkObjectVisitor visitor(this);
725 obj->VisitReferences(visitor, visitor);
726 }
727
728 // Scan anything that's on the mark stack.
ProcessMarkStack()729 void SemiSpace::ProcessMarkStack() {
730 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
731 accounting::ContinuousSpaceBitmap* live_bitmap = nullptr;
732 if (collect_from_space_only_) {
733 // If a bump pointer space only collection (and the promotion is
734 // enabled,) we delay the live-bitmap marking of promoted objects
735 // from MarkObject() until this function.
736 live_bitmap = promo_dest_space_->GetLiveBitmap();
737 DCHECK(live_bitmap != nullptr);
738 accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap();
739 DCHECK(mark_bitmap != nullptr);
740 DCHECK_EQ(live_bitmap, mark_bitmap);
741 }
742 while (!mark_stack_->IsEmpty()) {
743 Object* obj = mark_stack_->PopBack();
744 if (collect_from_space_only_ && promo_dest_space_->HasAddress(obj)) {
745 // obj has just been promoted. Mark the live bitmap for it,
746 // which is delayed from MarkObject().
747 DCHECK(!live_bitmap->Test(obj));
748 live_bitmap->Set(obj);
749 }
750 ScanObject(obj);
751 }
752 }
753
IsMarked(mirror::Object * obj)754 mirror::Object* SemiSpace::IsMarked(mirror::Object* obj) {
755 // All immune objects are assumed marked.
756 if (from_space_->HasAddress(obj)) {
757 // Returns either the forwarding address or null.
758 return GetForwardingAddressInFromSpace(obj);
759 } else if (collect_from_space_only_ ||
760 immune_spaces_.IsInImmuneRegion(obj) ||
761 to_space_->HasAddress(obj)) {
762 return obj; // Already forwarded, must be marked.
763 }
764 return mark_bitmap_->Test(obj) ? obj : nullptr;
765 }
766
IsMarkedHeapReference(mirror::HeapReference<mirror::Object> * object)767 bool SemiSpace::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) {
768 mirror::Object* obj = object->AsMirrorPtr();
769 mirror::Object* new_obj = IsMarked(obj);
770 if (new_obj == nullptr) {
771 return false;
772 }
773 if (new_obj != obj) {
774 // Write barrier is not necessary since it still points to the same object, just at a different
775 // address.
776 object->Assign(new_obj);
777 }
778 return true;
779 }
780
SetToSpace(space::ContinuousMemMapAllocSpace * to_space)781 void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) {
782 DCHECK(to_space != nullptr);
783 to_space_ = to_space;
784 }
785
SetFromSpace(space::ContinuousMemMapAllocSpace * from_space)786 void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) {
787 DCHECK(from_space != nullptr);
788 from_space_ = from_space;
789 }
790
FinishPhase()791 void SemiSpace::FinishPhase() {
792 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
793 if (kProtectFromSpace && from_space_->IsRosAllocSpace()) {
794 VLOG(heap) << "Protecting from_space_ with PROT_NONE : " << *from_space_;
795 from_space_->GetMemMap()->Protect(PROT_NONE);
796 }
797 // Null the "to" and "from" spaces since compacting from one to the other isn't valid until
798 // further action is done by the heap.
799 to_space_ = nullptr;
800 from_space_ = nullptr;
801 CHECK(mark_stack_->IsEmpty());
802 mark_stack_->Reset();
803 space::LargeObjectSpace* los = GetHeap()->GetLargeObjectsSpace();
804 if (generational_) {
805 // Decide whether to do a whole heap collection or a bump pointer
806 // only space collection at the next collection by updating
807 // collect_from_space_only_.
808 if (collect_from_space_only_) {
809 // Disable collect_from_space_only_ if the bytes promoted since the
810 // last whole heap collection or the large object bytes
811 // allocated exceeds a threshold.
812 bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_;
813 bool bytes_promoted_threshold_exceeded =
814 bytes_promoted_since_last_whole_heap_collection_ >= kBytesPromotedThreshold;
815 uint64_t current_los_bytes_allocated = los != nullptr ? los->GetBytesAllocated() : 0U;
816 uint64_t last_los_bytes_allocated =
817 large_object_bytes_allocated_at_last_whole_heap_collection_;
818 bool large_object_bytes_threshold_exceeded =
819 current_los_bytes_allocated >=
820 last_los_bytes_allocated + kLargeObjectBytesAllocatedThreshold;
821 if (bytes_promoted_threshold_exceeded || large_object_bytes_threshold_exceeded) {
822 collect_from_space_only_ = false;
823 }
824 } else {
825 // Reset the counters.
826 bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_;
827 large_object_bytes_allocated_at_last_whole_heap_collection_ =
828 los != nullptr ? los->GetBytesAllocated() : 0U;
829 collect_from_space_only_ = true;
830 }
831 }
832 // Clear all of the spaces' mark bitmaps.
833 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
834 heap_->ClearMarkedObjects();
835 }
836
RevokeAllThreadLocalBuffers()837 void SemiSpace::RevokeAllThreadLocalBuffers() {
838 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
839 GetHeap()->RevokeAllThreadLocalBuffers();
840 }
841
842 } // namespace collector
843 } // namespace gc
844 } // namespace art
845