1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "concurrent_copying.h"
18
19 #include "art_field-inl.h"
20 #include "barrier.h"
21 #include "base/enums.h"
22 #include "base/file_utils.h"
23 #include "base/histogram-inl.h"
24 #include "base/quasi_atomic.h"
25 #include "base/stl_util.h"
26 #include "base/systrace.h"
27 #include "class_root.h"
28 #include "debugger.h"
29 #include "gc/accounting/atomic_stack.h"
30 #include "gc/accounting/heap_bitmap-inl.h"
31 #include "gc/accounting/mod_union_table-inl.h"
32 #include "gc/accounting/read_barrier_table.h"
33 #include "gc/accounting/space_bitmap-inl.h"
34 #include "gc/gc_pause_listener.h"
35 #include "gc/reference_processor.h"
36 #include "gc/space/image_space.h"
37 #include "gc/space/space-inl.h"
38 #include "gc/verification.h"
39 #include "image-inl.h"
40 #include "intern_table.h"
41 #include "mirror/class-inl.h"
42 #include "mirror/object-inl.h"
43 #include "mirror/object-refvisitor-inl.h"
44 #include "mirror/object_reference.h"
45 #include "scoped_thread_state_change-inl.h"
46 #include "thread-inl.h"
47 #include "thread_list.h"
48 #include "well_known_classes.h"
49
50 namespace art {
51 namespace gc {
52 namespace collector {
53
54 static constexpr size_t kDefaultGcMarkStackSize = 2 * MB;
55 // If kFilterModUnionCards then we attempt to filter cards that don't need to be dirty in the mod
56 // union table. Disabled since it does not seem to help the pause much.
57 static constexpr bool kFilterModUnionCards = kIsDebugBuild;
58 // If kDisallowReadBarrierDuringScan is true then the GC aborts if there are any read barrier that
59 // occur during ConcurrentCopying::Scan in GC thread. May be used to diagnose possibly unnecessary
60 // read barriers. Only enabled for kIsDebugBuild to avoid performance hit.
61 static constexpr bool kDisallowReadBarrierDuringScan = kIsDebugBuild;
62 // Slow path mark stack size, increase this if the stack is getting full and it is causing
63 // performance problems.
64 static constexpr size_t kReadBarrierMarkStackSize = 512 * KB;
65 // Size (in the number of objects) of the sweep array free buffer.
66 static constexpr size_t kSweepArrayChunkFreeSize = 1024;
67 // Verify that there are no missing card marks.
68 static constexpr bool kVerifyNoMissingCardMarks = kIsDebugBuild;
69
ConcurrentCopying(Heap * heap,bool young_gen,bool use_generational_cc,const std::string & name_prefix,bool measure_read_barrier_slow_path)70 ConcurrentCopying::ConcurrentCopying(Heap* heap,
71 bool young_gen,
72 bool use_generational_cc,
73 const std::string& name_prefix,
74 bool measure_read_barrier_slow_path)
75 : GarbageCollector(heap,
76 name_prefix + (name_prefix.empty() ? "" : " ") +
77 "concurrent copying"),
78 region_space_(nullptr),
79 gc_barrier_(new Barrier(0)),
80 gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
81 kDefaultGcMarkStackSize,
82 kDefaultGcMarkStackSize)),
83 use_generational_cc_(use_generational_cc),
84 young_gen_(young_gen),
85 rb_mark_bit_stack_(accounting::ObjectStack::Create("rb copying gc mark stack",
86 kReadBarrierMarkStackSize,
87 kReadBarrierMarkStackSize)),
88 rb_mark_bit_stack_full_(false),
89 mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
90 thread_running_gc_(nullptr),
91 is_marking_(false),
92 is_using_read_barrier_entrypoints_(false),
93 is_active_(false),
94 is_asserting_to_space_invariant_(false),
95 region_space_bitmap_(nullptr),
96 heap_mark_bitmap_(nullptr),
97 live_stack_freeze_size_(0),
98 from_space_num_objects_at_first_pause_(0),
99 from_space_num_bytes_at_first_pause_(0),
100 mark_stack_mode_(kMarkStackModeOff),
101 weak_ref_access_enabled_(true),
102 copied_live_bytes_ratio_sum_(0.f),
103 gc_count_(0),
104 region_space_inter_region_bitmap_(nullptr),
105 non_moving_space_inter_region_bitmap_(nullptr),
106 reclaimed_bytes_ratio_sum_(0.f),
107 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
108 measure_read_barrier_slow_path_(measure_read_barrier_slow_path),
109 mark_from_read_barrier_measurements_(false),
110 rb_slow_path_ns_(0),
111 rb_slow_path_count_(0),
112 rb_slow_path_count_gc_(0),
113 rb_slow_path_histogram_lock_("Read barrier histogram lock"),
114 rb_slow_path_time_histogram_("Mutator time in read barrier slow path", 500, 32),
115 rb_slow_path_count_total_(0),
116 rb_slow_path_count_gc_total_(0),
117 rb_table_(heap_->GetReadBarrierTable()),
118 force_evacuate_all_(false),
119 gc_grays_immune_objects_(false),
120 immune_gray_stack_lock_("concurrent copying immune gray stack lock",
121 kMarkSweepMarkStackLock),
122 num_bytes_allocated_before_gc_(0) {
123 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
124 "The region space size and the read barrier table region size must match");
125 CHECK(use_generational_cc_ || !young_gen_);
126 Thread* self = Thread::Current();
127 {
128 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
129 // Cache this so that we won't have to lock heap_bitmap_lock_ in
130 // Mark() which could cause a nested lock on heap_bitmap_lock_
131 // when GC causes a RB while doing GC or a lock order violation
132 // (class_linker_lock_ and heap_bitmap_lock_).
133 heap_mark_bitmap_ = heap->GetMarkBitmap();
134 }
135 {
136 MutexLock mu(self, mark_stack_lock_);
137 for (size_t i = 0; i < kMarkStackPoolSize; ++i) {
138 accounting::AtomicStack<mirror::Object>* mark_stack =
139 accounting::AtomicStack<mirror::Object>::Create(
140 "thread local mark stack", kMarkStackSize, kMarkStackSize);
141 pooled_mark_stacks_.push_back(mark_stack);
142 }
143 }
144 if (use_generational_cc_) {
145 // Allocate sweep array free buffer.
146 std::string error_msg;
147 sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
148 "concurrent copying sweep array free buffer",
149 RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
150 PROT_READ | PROT_WRITE,
151 /*low_4gb=*/ false,
152 &error_msg);
153 CHECK(sweep_array_free_buffer_mem_map_.IsValid())
154 << "Couldn't allocate sweep array free buffer: " << error_msg;
155 }
156 }
157
MarkHeapReference(mirror::HeapReference<mirror::Object> * field,bool do_atomic_update)158 void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* field,
159 bool do_atomic_update) {
160 Thread* const self = Thread::Current();
161 if (UNLIKELY(do_atomic_update)) {
162 // Used to mark the referent in DelayReferenceReferent in transaction mode.
163 mirror::Object* from_ref = field->AsMirrorPtr();
164 if (from_ref == nullptr) {
165 return;
166 }
167 mirror::Object* to_ref = Mark(self, from_ref);
168 if (from_ref != to_ref) {
169 do {
170 if (field->AsMirrorPtr() != from_ref) {
171 // Concurrently overwritten by a mutator.
172 break;
173 }
174 } while (!field->CasWeakRelaxed(from_ref, to_ref));
175 }
176 } else {
177 // Used for preserving soft references, should be OK to not have a CAS here since there should be
178 // no other threads which can trigger read barriers on the same referent during reference
179 // processing.
180 field->Assign(Mark(self, field->AsMirrorPtr()));
181 }
182 }
183
~ConcurrentCopying()184 ConcurrentCopying::~ConcurrentCopying() {
185 STLDeleteElements(&pooled_mark_stacks_);
186 }
187
RunPhases()188 void ConcurrentCopying::RunPhases() {
189 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier);
190 CHECK(!is_active_);
191 is_active_ = true;
192 Thread* self = Thread::Current();
193 thread_running_gc_ = self;
194 Locks::mutator_lock_->AssertNotHeld(self);
195 {
196 ReaderMutexLock mu(self, *Locks::mutator_lock_);
197 InitializePhase();
198 // In case of forced evacuation, all regions are evacuated and hence no
199 // need to compute live_bytes.
200 if (use_generational_cc_ && !young_gen_ && !force_evacuate_all_) {
201 MarkingPhase();
202 }
203 }
204 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
205 // Switch to read barrier mark entrypoints before we gray the objects. This is required in case
206 // a mutator sees a gray bit and dispatches on the entrypoint. (b/37876887).
207 ActivateReadBarrierEntrypoints();
208 // Gray dirty immune objects concurrently to reduce GC pause times. We re-process gray cards in
209 // the pause.
210 ReaderMutexLock mu(self, *Locks::mutator_lock_);
211 GrayAllDirtyImmuneObjects();
212 }
213 FlipThreadRoots();
214 {
215 ReaderMutexLock mu(self, *Locks::mutator_lock_);
216 CopyingPhase();
217 }
218 // Verify no from space refs. This causes a pause.
219 if (kEnableNoFromSpaceRefsVerification) {
220 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
221 ScopedPause pause(this, false);
222 CheckEmptyMarkStack();
223 if (kVerboseMode) {
224 LOG(INFO) << "Verifying no from-space refs";
225 }
226 VerifyNoFromSpaceReferences();
227 if (kVerboseMode) {
228 LOG(INFO) << "Done verifying no from-space refs";
229 }
230 CheckEmptyMarkStack();
231 }
232 {
233 ReaderMutexLock mu(self, *Locks::mutator_lock_);
234 ReclaimPhase();
235 }
236 FinishPhase();
237 CHECK(is_active_);
238 is_active_ = false;
239 thread_running_gc_ = nullptr;
240 }
241
242 class ConcurrentCopying::ActivateReadBarrierEntrypointsCheckpoint : public Closure {
243 public:
ActivateReadBarrierEntrypointsCheckpoint(ConcurrentCopying * concurrent_copying)244 explicit ActivateReadBarrierEntrypointsCheckpoint(ConcurrentCopying* concurrent_copying)
245 : concurrent_copying_(concurrent_copying) {}
246
Run(Thread * thread)247 void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
248 // Note: self is not necessarily equal to thread since thread may be suspended.
249 Thread* self = Thread::Current();
250 DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
251 << thread->GetState() << " thread " << thread << " self " << self;
252 // Switch to the read barrier entrypoints.
253 thread->SetReadBarrierEntrypoints();
254 // If thread is a running mutator, then act on behalf of the garbage collector.
255 // See the code in ThreadList::RunCheckpoint.
256 concurrent_copying_->GetBarrier().Pass(self);
257 }
258
259 private:
260 ConcurrentCopying* const concurrent_copying_;
261 };
262
263 class ConcurrentCopying::ActivateReadBarrierEntrypointsCallback : public Closure {
264 public:
ActivateReadBarrierEntrypointsCallback(ConcurrentCopying * concurrent_copying)265 explicit ActivateReadBarrierEntrypointsCallback(ConcurrentCopying* concurrent_copying)
266 : concurrent_copying_(concurrent_copying) {}
267
Run(Thread * self ATTRIBUTE_UNUSED)268 void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
269 // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
270 // to avoid a race with ThreadList::Register().
271 CHECK(!concurrent_copying_->is_using_read_barrier_entrypoints_);
272 concurrent_copying_->is_using_read_barrier_entrypoints_ = true;
273 }
274
275 private:
276 ConcurrentCopying* const concurrent_copying_;
277 };
278
ActivateReadBarrierEntrypoints()279 void ConcurrentCopying::ActivateReadBarrierEntrypoints() {
280 Thread* const self = Thread::Current();
281 ActivateReadBarrierEntrypointsCheckpoint checkpoint(this);
282 ThreadList* thread_list = Runtime::Current()->GetThreadList();
283 gc_barrier_->Init(self, 0);
284 ActivateReadBarrierEntrypointsCallback callback(this);
285 const size_t barrier_count = thread_list->RunCheckpoint(&checkpoint, &callback);
286 // If there are no threads to wait which implies that all the checkpoint functions are finished,
287 // then no need to release the mutator lock.
288 if (barrier_count == 0) {
289 return;
290 }
291 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
292 gc_barrier_->Increment(self, barrier_count);
293 }
294
CreateInterRegionRefBitmaps()295 void ConcurrentCopying::CreateInterRegionRefBitmaps() {
296 DCHECK(use_generational_cc_);
297 DCHECK(region_space_inter_region_bitmap_ == nullptr);
298 DCHECK(non_moving_space_inter_region_bitmap_ == nullptr);
299 DCHECK(region_space_ != nullptr);
300 DCHECK(heap_->non_moving_space_ != nullptr);
301 // Region-space
302 region_space_inter_region_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
303 "region-space inter region ref bitmap",
304 reinterpret_cast<uint8_t*>(region_space_->Begin()),
305 region_space_->Limit() - region_space_->Begin()));
306 CHECK(region_space_inter_region_bitmap_ != nullptr)
307 << "Couldn't allocate region-space inter region ref bitmap";
308
309 // non-moving-space
310 non_moving_space_inter_region_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
311 "non-moving-space inter region ref bitmap",
312 reinterpret_cast<uint8_t*>(heap_->non_moving_space_->Begin()),
313 heap_->non_moving_space_->Limit() - heap_->non_moving_space_->Begin()));
314 CHECK(non_moving_space_inter_region_bitmap_ != nullptr)
315 << "Couldn't allocate non-moving-space inter region ref bitmap";
316 }
317
BindBitmaps()318 void ConcurrentCopying::BindBitmaps() {
319 Thread* self = Thread::Current();
320 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
321 // Mark all of the spaces we never collect as immune.
322 for (const auto& space : heap_->GetContinuousSpaces()) {
323 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
324 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
325 CHECK(space->IsZygoteSpace() || space->IsImageSpace());
326 immune_spaces_.AddSpace(space);
327 } else {
328 CHECK(!space->IsZygoteSpace());
329 CHECK(!space->IsImageSpace());
330 CHECK(space == region_space_ || space == heap_->non_moving_space_);
331 if (use_generational_cc_) {
332 if (space == region_space_) {
333 region_space_bitmap_ = region_space_->GetMarkBitmap();
334 } else if (young_gen_ && space->IsContinuousMemMapAllocSpace()) {
335 DCHECK_EQ(space->GetGcRetentionPolicy(), space::kGcRetentionPolicyAlwaysCollect);
336 space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
337 }
338 if (young_gen_) {
339 // Age all of the cards for the region space so that we know which evac regions to scan.
340 heap_->GetCardTable()->ModifyCardsAtomic(space->Begin(),
341 space->End(),
342 AgeCardVisitor(),
343 VoidFunctor());
344 } else {
345 // In a full-heap GC cycle, the card-table corresponding to region-space and
346 // non-moving space can be cleared, because this cycle only needs to
347 // capture writes during the marking phase of this cycle to catch
348 // objects that skipped marking due to heap mutation. Furthermore,
349 // if the next GC is a young-gen cycle, then it only needs writes to
350 // be captured after the thread-flip of this GC cycle, as that is when
351 // the young-gen for the next GC cycle starts getting populated.
352 heap_->GetCardTable()->ClearCardRange(space->Begin(), space->Limit());
353 }
354 } else {
355 if (space == region_space_) {
356 // It is OK to clear the bitmap with mutators running since the only place it is read is
357 // VisitObjects which has exclusion with CC.
358 region_space_bitmap_ = region_space_->GetMarkBitmap();
359 region_space_bitmap_->Clear();
360 }
361 }
362 }
363 }
364 if (use_generational_cc_ && young_gen_) {
365 for (const auto& space : GetHeap()->GetDiscontinuousSpaces()) {
366 CHECK(space->IsLargeObjectSpace());
367 space->AsLargeObjectSpace()->CopyLiveToMarked();
368 }
369 }
370 }
371
InitializePhase()372 void ConcurrentCopying::InitializePhase() {
373 TimingLogger::ScopedTiming split("InitializePhase", GetTimings());
374 num_bytes_allocated_before_gc_ = static_cast<int64_t>(heap_->GetBytesAllocated());
375 if (kVerboseMode) {
376 LOG(INFO) << "GC InitializePhase";
377 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-"
378 << reinterpret_cast<void*>(region_space_->Limit());
379 }
380 CheckEmptyMarkStack();
381 rb_mark_bit_stack_full_ = false;
382 mark_from_read_barrier_measurements_ = measure_read_barrier_slow_path_;
383 if (measure_read_barrier_slow_path_) {
384 rb_slow_path_ns_.store(0, std::memory_order_relaxed);
385 rb_slow_path_count_.store(0, std::memory_order_relaxed);
386 rb_slow_path_count_gc_.store(0, std::memory_order_relaxed);
387 }
388
389 immune_spaces_.Reset();
390 bytes_moved_.store(0, std::memory_order_relaxed);
391 objects_moved_.store(0, std::memory_order_relaxed);
392 bytes_moved_gc_thread_ = 0;
393 objects_moved_gc_thread_ = 0;
394 GcCause gc_cause = GetCurrentIteration()->GetGcCause();
395
396 force_evacuate_all_ = false;
397 if (!use_generational_cc_ || !young_gen_) {
398 if (gc_cause == kGcCauseExplicit ||
399 gc_cause == kGcCauseCollectorTransition ||
400 GetCurrentIteration()->GetClearSoftReferences()) {
401 force_evacuate_all_ = true;
402 }
403 }
404 if (kUseBakerReadBarrier) {
405 updated_all_immune_objects_.store(false, std::memory_order_relaxed);
406 // GC may gray immune objects in the thread flip.
407 gc_grays_immune_objects_ = true;
408 if (kIsDebugBuild) {
409 MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
410 DCHECK(immune_gray_stack_.empty());
411 }
412 }
413 if (use_generational_cc_) {
414 done_scanning_.store(false, std::memory_order_release);
415 }
416 BindBitmaps();
417 if (kVerboseMode) {
418 LOG(INFO) << "young_gen=" << std::boolalpha << young_gen_ << std::noboolalpha;
419 LOG(INFO) << "force_evacuate_all=" << std::boolalpha << force_evacuate_all_ << std::noboolalpha;
420 LOG(INFO) << "Largest immune region: " << immune_spaces_.GetLargestImmuneRegion().Begin()
421 << "-" << immune_spaces_.GetLargestImmuneRegion().End();
422 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
423 LOG(INFO) << "Immune space: " << *space;
424 }
425 LOG(INFO) << "GC end of InitializePhase";
426 }
427 if (use_generational_cc_ && !young_gen_) {
428 region_space_bitmap_->Clear();
429 }
430 mark_stack_mode_.store(ConcurrentCopying::kMarkStackModeThreadLocal, std::memory_order_relaxed);
431 // Mark all of the zygote large objects without graying them.
432 MarkZygoteLargeObjects();
433 }
434
435 // Used to switch the thread roots of a thread from from-space refs to to-space refs.
436 class ConcurrentCopying::ThreadFlipVisitor : public Closure, public RootVisitor {
437 public:
ThreadFlipVisitor(ConcurrentCopying * concurrent_copying,bool use_tlab)438 ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab)
439 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
440 }
441
Run(Thread * thread)442 void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
443 // Note: self is not necessarily equal to thread since thread may be suspended.
444 Thread* self = Thread::Current();
445 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
446 << thread->GetState() << " thread " << thread << " self " << self;
447 thread->SetIsGcMarkingAndUpdateEntrypoints(true);
448 if (use_tlab_ && thread->HasTlab()) {
449 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
450 // This must come before the revoke.
451 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated();
452 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
453 reinterpret_cast<Atomic<size_t>*>(
454 &concurrent_copying_->from_space_num_objects_at_first_pause_)->
455 fetch_add(thread_local_objects, std::memory_order_relaxed);
456 } else {
457 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
458 }
459 }
460 if (kUseThreadLocalAllocationStack) {
461 thread->RevokeThreadLocalAllocationStack();
462 }
463 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
464 // We can use the non-CAS VisitRoots functions below because we update thread-local GC roots
465 // only.
466 thread->VisitRoots(this, kVisitRootFlagAllRoots);
467 concurrent_copying_->GetBarrier().Pass(self);
468 }
469
VisitRoots(mirror::Object *** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)470 void VisitRoots(mirror::Object*** roots,
471 size_t count,
472 const RootInfo& info ATTRIBUTE_UNUSED) override
473 REQUIRES_SHARED(Locks::mutator_lock_) {
474 Thread* self = Thread::Current();
475 for (size_t i = 0; i < count; ++i) {
476 mirror::Object** root = roots[i];
477 mirror::Object* ref = *root;
478 if (ref != nullptr) {
479 mirror::Object* to_ref = concurrent_copying_->Mark(self, ref);
480 if (to_ref != ref) {
481 *root = to_ref;
482 }
483 }
484 }
485 }
486
VisitRoots(mirror::CompressedReference<mirror::Object> ** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)487 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
488 size_t count,
489 const RootInfo& info ATTRIBUTE_UNUSED) override
490 REQUIRES_SHARED(Locks::mutator_lock_) {
491 Thread* self = Thread::Current();
492 for (size_t i = 0; i < count; ++i) {
493 mirror::CompressedReference<mirror::Object>* const root = roots[i];
494 if (!root->IsNull()) {
495 mirror::Object* ref = root->AsMirrorPtr();
496 mirror::Object* to_ref = concurrent_copying_->Mark(self, ref);
497 if (to_ref != ref) {
498 root->Assign(to_ref);
499 }
500 }
501 }
502 }
503
504 private:
505 ConcurrentCopying* const concurrent_copying_;
506 const bool use_tlab_;
507 };
508
509 // Called back from Runtime::FlipThreadRoots() during a pause.
510 class ConcurrentCopying::FlipCallback : public Closure {
511 public:
FlipCallback(ConcurrentCopying * concurrent_copying)512 explicit FlipCallback(ConcurrentCopying* concurrent_copying)
513 : concurrent_copying_(concurrent_copying) {
514 }
515
Run(Thread * thread)516 void Run(Thread* thread) override REQUIRES(Locks::mutator_lock_) {
517 ConcurrentCopying* cc = concurrent_copying_;
518 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
519 // Note: self is not necessarily equal to thread since thread may be suspended.
520 Thread* self = Thread::Current();
521 if (kVerifyNoMissingCardMarks && cc->young_gen_) {
522 cc->VerifyNoMissingCardMarks();
523 }
524 CHECK_EQ(thread, self);
525 Locks::mutator_lock_->AssertExclusiveHeld(self);
526 space::RegionSpace::EvacMode evac_mode = space::RegionSpace::kEvacModeLivePercentNewlyAllocated;
527 if (cc->young_gen_) {
528 CHECK(!cc->force_evacuate_all_);
529 evac_mode = space::RegionSpace::kEvacModeNewlyAllocated;
530 } else if (cc->force_evacuate_all_) {
531 evac_mode = space::RegionSpace::kEvacModeForceAll;
532 }
533 {
534 TimingLogger::ScopedTiming split2("(Paused)SetFromSpace", cc->GetTimings());
535 // Only change live bytes for 1-phase full heap CC.
536 cc->region_space_->SetFromSpace(
537 cc->rb_table_,
538 evac_mode,
539 /*clear_live_bytes=*/ !cc->use_generational_cc_);
540 }
541 cc->SwapStacks();
542 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
543 cc->RecordLiveStackFreezeSize(self);
544 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated();
545 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
546 }
547 cc->is_marking_ = true;
548 if (kIsDebugBuild && !cc->use_generational_cc_) {
549 cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared();
550 }
551 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
552 CHECK(Runtime::Current()->IsAotCompiler());
553 TimingLogger::ScopedTiming split3("(Paused)VisitTransactionRoots", cc->GetTimings());
554 Runtime::Current()->VisitTransactionRoots(cc);
555 }
556 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
557 cc->GrayAllNewlyDirtyImmuneObjects();
558 if (kIsDebugBuild) {
559 // Check that all non-gray immune objects only reference immune objects.
560 cc->VerifyGrayImmuneObjects();
561 }
562 }
563 // May be null during runtime creation, in this case leave java_lang_Object null.
564 // This is safe since single threaded behavior should mean FillDummyObject does not
565 // happen when java_lang_Object_ is null.
566 if (WellKnownClasses::java_lang_Object != nullptr) {
567 cc->java_lang_Object_ = down_cast<mirror::Class*>(cc->Mark(thread,
568 WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object).Ptr()));
569 } else {
570 cc->java_lang_Object_ = nullptr;
571 }
572 }
573
574 private:
575 ConcurrentCopying* const concurrent_copying_;
576 };
577
578 class ConcurrentCopying::VerifyGrayImmuneObjectsVisitor {
579 public:
VerifyGrayImmuneObjectsVisitor(ConcurrentCopying * collector)580 explicit VerifyGrayImmuneObjectsVisitor(ConcurrentCopying* collector)
581 : collector_(collector) {}
582
operator ()(ObjPtr<mirror::Object> obj,MemberOffset offset,bool) const583 void operator()(ObjPtr<mirror::Object> obj, MemberOffset offset, bool /* is_static */)
584 const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
585 REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
586 CheckReference(obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset),
587 obj, offset);
588 }
589
operator ()(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref) const590 void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
591 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
592 CHECK(klass->IsTypeOfReferenceClass());
593 CheckReference(ref->GetReferent<kWithoutReadBarrier>(),
594 ref,
595 mirror::Reference::ReferentOffset());
596 }
597
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const598 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
599 ALWAYS_INLINE
600 REQUIRES_SHARED(Locks::mutator_lock_) {
601 if (!root->IsNull()) {
602 VisitRoot(root);
603 }
604 }
605
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const606 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
607 ALWAYS_INLINE
608 REQUIRES_SHARED(Locks::mutator_lock_) {
609 CheckReference(root->AsMirrorPtr(), nullptr, MemberOffset(0));
610 }
611
612 private:
613 ConcurrentCopying* const collector_;
614
CheckReference(ObjPtr<mirror::Object> ref,ObjPtr<mirror::Object> holder,MemberOffset offset) const615 void CheckReference(ObjPtr<mirror::Object> ref,
616 ObjPtr<mirror::Object> holder,
617 MemberOffset offset) const
618 REQUIRES_SHARED(Locks::mutator_lock_) {
619 if (ref != nullptr) {
620 if (!collector_->immune_spaces_.ContainsObject(ref.Ptr())) {
621 // Not immune, must be a zygote large object.
622 space::LargeObjectSpace* large_object_space =
623 Runtime::Current()->GetHeap()->GetLargeObjectsSpace();
624 CHECK(large_object_space->Contains(ref.Ptr()) &&
625 large_object_space->IsZygoteLargeObject(Thread::Current(), ref.Ptr()))
626 << "Non gray object references non immune, non zygote large object "<< ref << " "
627 << mirror::Object::PrettyTypeOf(ref) << " in holder " << holder << " "
628 << mirror::Object::PrettyTypeOf(holder) << " offset=" << offset.Uint32Value();
629 } else {
630 // Make sure the large object class is immune since we will never scan the large object.
631 CHECK(collector_->immune_spaces_.ContainsObject(
632 ref->GetClass<kVerifyNone, kWithoutReadBarrier>()));
633 }
634 }
635 }
636 };
637
VerifyGrayImmuneObjects()638 void ConcurrentCopying::VerifyGrayImmuneObjects() {
639 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
640 for (auto& space : immune_spaces_.GetSpaces()) {
641 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
642 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
643 VerifyGrayImmuneObjectsVisitor visitor(this);
644 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
645 reinterpret_cast<uintptr_t>(space->Limit()),
646 [&visitor](mirror::Object* obj)
647 REQUIRES_SHARED(Locks::mutator_lock_) {
648 // If an object is not gray, it should only have references to things in the immune spaces.
649 if (obj->GetReadBarrierState() != ReadBarrier::GrayState()) {
650 obj->VisitReferences</*kVisitNativeRoots=*/true,
651 kDefaultVerifyFlags,
652 kWithoutReadBarrier>(visitor, visitor);
653 }
654 });
655 }
656 }
657
658 class ConcurrentCopying::VerifyNoMissingCardMarkVisitor {
659 public:
VerifyNoMissingCardMarkVisitor(ConcurrentCopying * cc,ObjPtr<mirror::Object> holder)660 VerifyNoMissingCardMarkVisitor(ConcurrentCopying* cc, ObjPtr<mirror::Object> holder)
661 : cc_(cc),
662 holder_(holder) {}
663
operator ()(ObjPtr<mirror::Object> obj,MemberOffset offset,bool is_static ATTRIBUTE_UNUSED) const664 void operator()(ObjPtr<mirror::Object> obj,
665 MemberOffset offset,
666 bool is_static ATTRIBUTE_UNUSED) const
667 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
668 if (offset.Uint32Value() != mirror::Object::ClassOffset().Uint32Value()) {
669 CheckReference(obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(
670 offset), offset.Uint32Value());
671 }
672 }
operator ()(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref) const673 void operator()(ObjPtr<mirror::Class> klass,
674 ObjPtr<mirror::Reference> ref) const
675 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
676 CHECK(klass->IsTypeOfReferenceClass());
677 this->operator()(ref, mirror::Reference::ReferentOffset(), false);
678 }
679
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const680 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
681 REQUIRES_SHARED(Locks::mutator_lock_) {
682 if (!root->IsNull()) {
683 VisitRoot(root);
684 }
685 }
686
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const687 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
688 REQUIRES_SHARED(Locks::mutator_lock_) {
689 CheckReference(root->AsMirrorPtr());
690 }
691
CheckReference(mirror::Object * ref,int32_t offset=-1) const692 void CheckReference(mirror::Object* ref, int32_t offset = -1) const
693 REQUIRES_SHARED(Locks::mutator_lock_) {
694 if (ref != nullptr && cc_->region_space_->IsInNewlyAllocatedRegion(ref)) {
695 LOG(FATAL_WITHOUT_ABORT)
696 << holder_->PrettyTypeOf() << "(" << holder_.Ptr() << ") references object "
697 << ref->PrettyTypeOf() << "(" << ref << ") in newly allocated region at offset=" << offset;
698 LOG(FATAL_WITHOUT_ABORT) << "time=" << cc_->region_space_->Time();
699 constexpr const char* kIndent = " ";
700 LOG(FATAL_WITHOUT_ABORT) << cc_->DumpReferenceInfo(holder_.Ptr(), "holder_", kIndent);
701 LOG(FATAL_WITHOUT_ABORT) << cc_->DumpReferenceInfo(ref, "ref", kIndent);
702 LOG(FATAL) << "Unexpected reference to newly allocated region.";
703 }
704 }
705
706 private:
707 ConcurrentCopying* const cc_;
708 const ObjPtr<mirror::Object> holder_;
709 };
710
VerifyNoMissingCardMarks()711 void ConcurrentCopying::VerifyNoMissingCardMarks() {
712 auto visitor = [&](mirror::Object* obj)
713 REQUIRES(Locks::mutator_lock_)
714 REQUIRES(!mark_stack_lock_) {
715 // Objects on clean cards should never have references to newly allocated regions. Note
716 // that aged cards are also not clean.
717 if (heap_->GetCardTable()->GetCard(obj) == gc::accounting::CardTable::kCardClean) {
718 VerifyNoMissingCardMarkVisitor internal_visitor(this, /*holder=*/ obj);
719 obj->VisitReferences</*kVisitNativeRoots=*/true, kVerifyNone, kWithoutReadBarrier>(
720 internal_visitor, internal_visitor);
721 }
722 };
723 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
724 region_space_->Walk(visitor);
725 {
726 ReaderMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
727 heap_->GetLiveBitmap()->Visit(visitor);
728 }
729 }
730
731 // Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
FlipThreadRoots()732 void ConcurrentCopying::FlipThreadRoots() {
733 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
734 if (kVerboseMode || heap_->dump_region_info_before_gc_) {
735 LOG(INFO) << "time=" << region_space_->Time();
736 region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
737 }
738 Thread* self = Thread::Current();
739 Locks::mutator_lock_->AssertNotHeld(self);
740 gc_barrier_->Init(self, 0);
741 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
742 FlipCallback flip_callback(this);
743
744 size_t barrier_count = Runtime::Current()->GetThreadList()->FlipThreadRoots(
745 &thread_flip_visitor, &flip_callback, this, GetHeap()->GetGcPauseListener());
746
747 {
748 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
749 gc_barrier_->Increment(self, barrier_count);
750 }
751 is_asserting_to_space_invariant_ = true;
752 QuasiAtomic::ThreadFenceForConstructor();
753 if (kVerboseMode) {
754 LOG(INFO) << "time=" << region_space_->Time();
755 region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
756 LOG(INFO) << "GC end of FlipThreadRoots";
757 }
758 }
759
760 template <bool kConcurrent>
761 class ConcurrentCopying::GrayImmuneObjectVisitor {
762 public:
GrayImmuneObjectVisitor(Thread * self)763 explicit GrayImmuneObjectVisitor(Thread* self) : self_(self) {}
764
operator ()(mirror::Object * obj) const765 ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
766 if (kUseBakerReadBarrier && obj->GetReadBarrierState() == ReadBarrier::NonGrayState()) {
767 if (kConcurrent) {
768 Locks::mutator_lock_->AssertSharedHeld(self_);
769 obj->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState());
770 // Mod union table VisitObjects may visit the same object multiple times so we can't check
771 // the result of the atomic set.
772 } else {
773 Locks::mutator_lock_->AssertExclusiveHeld(self_);
774 obj->SetReadBarrierState(ReadBarrier::GrayState());
775 }
776 }
777 }
778
Callback(mirror::Object * obj,void * arg)779 static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
780 reinterpret_cast<GrayImmuneObjectVisitor<kConcurrent>*>(arg)->operator()(obj);
781 }
782
783 private:
784 Thread* const self_;
785 };
786
GrayAllDirtyImmuneObjects()787 void ConcurrentCopying::GrayAllDirtyImmuneObjects() {
788 TimingLogger::ScopedTiming split("GrayAllDirtyImmuneObjects", GetTimings());
789 accounting::CardTable* const card_table = heap_->GetCardTable();
790 Thread* const self = Thread::Current();
791 using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent= */ true>;
792 VisitorType visitor(self);
793 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
794 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
795 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
796 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
797 // Mark all the objects on dirty cards since these may point to objects in other space.
798 // Once these are marked, the GC will eventually clear them later.
799 // Table is non null for boot image and zygote spaces. It is only null for application image
800 // spaces.
801 if (table != nullptr) {
802 table->ProcessCards();
803 table->VisitObjects(&VisitorType::Callback, &visitor);
804 // Don't clear cards here since we need to rescan in the pause. If we cleared the cards here,
805 // there would be races with the mutator marking new cards.
806 } else {
807 // Keep cards aged if we don't have a mod-union table since we may need to scan them in future
808 // GCs. This case is for app images.
809 card_table->ModifyCardsAtomic(
810 space->Begin(),
811 space->End(),
812 [](uint8_t card) {
813 return (card != gc::accounting::CardTable::kCardClean)
814 ? gc::accounting::CardTable::kCardAged
815 : card;
816 },
817 /* card modified visitor */ VoidFunctor());
818 card_table->Scan</*kClearCard=*/ false>(space->GetMarkBitmap(),
819 space->Begin(),
820 space->End(),
821 visitor,
822 gc::accounting::CardTable::kCardAged);
823 }
824 }
825 }
826
GrayAllNewlyDirtyImmuneObjects()827 void ConcurrentCopying::GrayAllNewlyDirtyImmuneObjects() {
828 TimingLogger::ScopedTiming split("(Paused)GrayAllNewlyDirtyImmuneObjects", GetTimings());
829 accounting::CardTable* const card_table = heap_->GetCardTable();
830 using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent= */ false>;
831 Thread* const self = Thread::Current();
832 VisitorType visitor(self);
833 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
834 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
835 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
836 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
837
838 // Don't need to scan aged cards since we did these before the pause. Note that scanning cards
839 // also handles the mod-union table cards.
840 card_table->Scan</*kClearCard=*/ false>(space->GetMarkBitmap(),
841 space->Begin(),
842 space->End(),
843 visitor,
844 gc::accounting::CardTable::kCardDirty);
845 if (table != nullptr) {
846 // Add the cards to the mod-union table so that we can clear cards to save RAM.
847 table->ProcessCards();
848 TimingLogger::ScopedTiming split2("(Paused)ClearCards", GetTimings());
849 card_table->ClearCardRange(space->Begin(),
850 AlignDown(space->End(), accounting::CardTable::kCardSize));
851 }
852 }
853 // Since all of the objects that may point to other spaces are gray, we can avoid all the read
854 // barriers in the immune spaces.
855 updated_all_immune_objects_.store(true, std::memory_order_relaxed);
856 }
857
SwapStacks()858 void ConcurrentCopying::SwapStacks() {
859 heap_->SwapStacks();
860 }
861
RecordLiveStackFreezeSize(Thread * self)862 void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
863 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
864 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
865 }
866
867 // Used to visit objects in the immune spaces.
ScanImmuneObject(mirror::Object * obj)868 inline void ConcurrentCopying::ScanImmuneObject(mirror::Object* obj) {
869 DCHECK(obj != nullptr);
870 DCHECK(immune_spaces_.ContainsObject(obj));
871 // Update the fields without graying it or pushing it onto the mark stack.
872 if (use_generational_cc_ && young_gen_) {
873 // Young GC does not care about references to unevac space. It is safe to not gray these as
874 // long as scan immune objects happens after scanning the dirty cards.
875 Scan<true>(obj);
876 } else {
877 Scan<false>(obj);
878 }
879 }
880
881 class ConcurrentCopying::ImmuneSpaceScanObjVisitor {
882 public:
ImmuneSpaceScanObjVisitor(ConcurrentCopying * cc)883 explicit ImmuneSpaceScanObjVisitor(ConcurrentCopying* cc)
884 : collector_(cc) {}
885
operator ()(mirror::Object * obj) const886 ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
887 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
888 // Only need to scan gray objects.
889 if (obj->GetReadBarrierState() == ReadBarrier::GrayState()) {
890 collector_->ScanImmuneObject(obj);
891 // Done scanning the object, go back to black (non-gray).
892 bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
893 ReadBarrier::NonGrayState());
894 CHECK(success)
895 << Runtime::Current()->GetHeap()->GetVerification()->DumpObjectInfo(obj, "failed CAS");
896 }
897 } else {
898 collector_->ScanImmuneObject(obj);
899 }
900 }
901
Callback(mirror::Object * obj,void * arg)902 static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
903 reinterpret_cast<ImmuneSpaceScanObjVisitor*>(arg)->operator()(obj);
904 }
905
906 private:
907 ConcurrentCopying* const collector_;
908 };
909
910 template <bool kAtomicTestAndSet>
911 class ConcurrentCopying::CaptureRootsForMarkingVisitor : public RootVisitor {
912 public:
CaptureRootsForMarkingVisitor(ConcurrentCopying * cc,Thread * self)913 explicit CaptureRootsForMarkingVisitor(ConcurrentCopying* cc, Thread* self)
914 : collector_(cc), self_(self) {}
915
VisitRoots(mirror::Object *** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)916 void VisitRoots(mirror::Object*** roots,
917 size_t count,
918 const RootInfo& info ATTRIBUTE_UNUSED) override
919 REQUIRES_SHARED(Locks::mutator_lock_) {
920 for (size_t i = 0; i < count; ++i) {
921 mirror::Object** root = roots[i];
922 mirror::Object* ref = *root;
923 if (ref != nullptr && !collector_->TestAndSetMarkBitForRef<kAtomicTestAndSet>(ref)) {
924 collector_->PushOntoMarkStack(self_, ref);
925 }
926 }
927 }
928
VisitRoots(mirror::CompressedReference<mirror::Object> ** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)929 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
930 size_t count,
931 const RootInfo& info ATTRIBUTE_UNUSED) override
932 REQUIRES_SHARED(Locks::mutator_lock_) {
933 for (size_t i = 0; i < count; ++i) {
934 mirror::CompressedReference<mirror::Object>* const root = roots[i];
935 if (!root->IsNull()) {
936 mirror::Object* ref = root->AsMirrorPtr();
937 if (!collector_->TestAndSetMarkBitForRef<kAtomicTestAndSet>(ref)) {
938 collector_->PushOntoMarkStack(self_, ref);
939 }
940 }
941 }
942 }
943
944 private:
945 ConcurrentCopying* const collector_;
946 Thread* const self_;
947 };
948
949 class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
950 public:
RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying * concurrent_copying,bool disable_weak_ref_access)951 RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
952 bool disable_weak_ref_access)
953 : concurrent_copying_(concurrent_copying),
954 disable_weak_ref_access_(disable_weak_ref_access) {
955 }
956
Run(Thread * thread)957 void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
958 // Note: self is not necessarily equal to thread since thread may be suspended.
959 Thread* const self = Thread::Current();
960 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
961 << thread->GetState() << " thread " << thread << " self " << self;
962 // Revoke thread local mark stacks.
963 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
964 if (tl_mark_stack != nullptr) {
965 MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
966 concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
967 thread->SetThreadLocalMarkStack(nullptr);
968 }
969 // Disable weak ref access.
970 if (disable_weak_ref_access_) {
971 thread->SetWeakRefAccessEnabled(false);
972 }
973 // If thread is a running mutator, then act on behalf of the garbage collector.
974 // See the code in ThreadList::RunCheckpoint.
975 concurrent_copying_->GetBarrier().Pass(self);
976 }
977
978 protected:
979 ConcurrentCopying* const concurrent_copying_;
980
981 private:
982 const bool disable_weak_ref_access_;
983 };
984
985 class ConcurrentCopying::CaptureThreadRootsForMarkingAndCheckpoint :
986 public RevokeThreadLocalMarkStackCheckpoint {
987 public:
CaptureThreadRootsForMarkingAndCheckpoint(ConcurrentCopying * cc)988 explicit CaptureThreadRootsForMarkingAndCheckpoint(ConcurrentCopying* cc) :
989 RevokeThreadLocalMarkStackCheckpoint(cc, /* disable_weak_ref_access */ false) {}
990
Run(Thread * thread)991 void Run(Thread* thread) override
992 REQUIRES_SHARED(Locks::mutator_lock_) {
993 Thread* const self = Thread::Current();
994 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
995 // We can use the non-CAS VisitRoots functions below because we update thread-local GC roots
996 // only.
997 CaptureRootsForMarkingVisitor</*kAtomicTestAndSet*/ true> visitor(concurrent_copying_, self);
998 thread->VisitRoots(&visitor, kVisitRootFlagAllRoots);
999 // Barrier handling is done in the base class' Run() below.
1000 RevokeThreadLocalMarkStackCheckpoint::Run(thread);
1001 }
1002 };
1003
CaptureThreadRootsForMarking()1004 void ConcurrentCopying::CaptureThreadRootsForMarking() {
1005 TimingLogger::ScopedTiming split("CaptureThreadRootsForMarking", GetTimings());
1006 if (kVerboseMode) {
1007 LOG(INFO) << "time=" << region_space_->Time();
1008 region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
1009 }
1010 Thread* const self = Thread::Current();
1011 CaptureThreadRootsForMarkingAndCheckpoint check_point(this);
1012 ThreadList* thread_list = Runtime::Current()->GetThreadList();
1013 gc_barrier_->Init(self, 0);
1014 size_t barrier_count = thread_list->RunCheckpoint(&check_point, /* callback */ nullptr);
1015 // If there are no threads to wait which implys that all the checkpoint functions are finished,
1016 // then no need to release the mutator lock.
1017 if (barrier_count == 0) {
1018 return;
1019 }
1020 Locks::mutator_lock_->SharedUnlock(self);
1021 {
1022 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1023 gc_barrier_->Increment(self, barrier_count);
1024 }
1025 Locks::mutator_lock_->SharedLock(self);
1026 if (kVerboseMode) {
1027 LOG(INFO) << "time=" << region_space_->Time();
1028 region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
1029 LOG(INFO) << "GC end of CaptureThreadRootsForMarking";
1030 }
1031 }
1032
1033 // Used to scan ref fields of an object.
1034 template <bool kHandleInterRegionRefs>
1035 class ConcurrentCopying::ComputeLiveBytesAndMarkRefFieldsVisitor {
1036 public:
ComputeLiveBytesAndMarkRefFieldsVisitor(ConcurrentCopying * collector,size_t obj_region_idx)1037 explicit ComputeLiveBytesAndMarkRefFieldsVisitor(ConcurrentCopying* collector,
1038 size_t obj_region_idx)
1039 : collector_(collector),
1040 obj_region_idx_(obj_region_idx),
1041 contains_inter_region_idx_(false) {}
1042
operator ()(mirror::Object * obj,MemberOffset offset,bool) const1043 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
1044 ALWAYS_INLINE
1045 REQUIRES_SHARED(Locks::mutator_lock_)
1046 REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
1047 DCHECK_EQ(collector_->RegionSpace()->RegionIdxForRef(obj), obj_region_idx_);
1048 DCHECK(kHandleInterRegionRefs || collector_->immune_spaces_.ContainsObject(obj));
1049 CheckReference(obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset));
1050 }
1051
operator ()(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref) const1052 void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
1053 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
1054 DCHECK(klass->IsTypeOfReferenceClass());
1055 // If the referent is not null, then we must re-visit the object during
1056 // copying phase to enqueue it for delayed processing and setting
1057 // read-barrier state to gray to ensure that call to GetReferent() triggers
1058 // the read-barrier. We use same data structure that is used to remember
1059 // objects with inter-region refs for this purpose too.
1060 if (kHandleInterRegionRefs
1061 && !contains_inter_region_idx_
1062 && ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr) {
1063 contains_inter_region_idx_ = true;
1064 }
1065 }
1066
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const1067 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
1068 ALWAYS_INLINE
1069 REQUIRES_SHARED(Locks::mutator_lock_) {
1070 if (!root->IsNull()) {
1071 VisitRoot(root);
1072 }
1073 }
1074
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const1075 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
1076 ALWAYS_INLINE
1077 REQUIRES_SHARED(Locks::mutator_lock_) {
1078 CheckReference(root->AsMirrorPtr());
1079 }
1080
ContainsInterRegionRefs() const1081 bool ContainsInterRegionRefs() const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
1082 return contains_inter_region_idx_;
1083 }
1084
1085 private:
CheckReference(mirror::Object * ref) const1086 void CheckReference(mirror::Object* ref) const
1087 REQUIRES_SHARED(Locks::mutator_lock_) {
1088 if (ref == nullptr) {
1089 // Nothing to do.
1090 return;
1091 }
1092 if (!collector_->TestAndSetMarkBitForRef(ref)) {
1093 collector_->PushOntoLocalMarkStack(ref);
1094 }
1095 if (kHandleInterRegionRefs && !contains_inter_region_idx_) {
1096 size_t ref_region_idx = collector_->RegionSpace()->RegionIdxForRef(ref);
1097 // If a region-space object refers to an outside object, we will have a
1098 // mismatch of region idx, but the object need not be re-visited in
1099 // copying phase.
1100 if (ref_region_idx != static_cast<size_t>(-1) && obj_region_idx_ != ref_region_idx) {
1101 contains_inter_region_idx_ = true;
1102 }
1103 }
1104 }
1105
1106 ConcurrentCopying* const collector_;
1107 const size_t obj_region_idx_;
1108 mutable bool contains_inter_region_idx_;
1109 };
1110
AddLiveBytesAndScanRef(mirror::Object * ref)1111 void ConcurrentCopying::AddLiveBytesAndScanRef(mirror::Object* ref) {
1112 DCHECK(ref != nullptr);
1113 DCHECK(!immune_spaces_.ContainsObject(ref));
1114 DCHECK(TestMarkBitmapForRef(ref));
1115 size_t obj_region_idx = static_cast<size_t>(-1);
1116 if (LIKELY(region_space_->HasAddress(ref))) {
1117 obj_region_idx = region_space_->RegionIdxForRefUnchecked(ref);
1118 // Add live bytes to the corresponding region
1119 if (!region_space_->IsRegionNewlyAllocated(obj_region_idx)) {
1120 // Newly Allocated regions are always chosen for evacuation. So no need
1121 // to update live_bytes_.
1122 size_t obj_size = ref->SizeOf<kDefaultVerifyFlags>();
1123 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1124 region_space_->AddLiveBytes(ref, alloc_size);
1125 }
1126 }
1127 ComputeLiveBytesAndMarkRefFieldsVisitor</*kHandleInterRegionRefs*/ true>
1128 visitor(this, obj_region_idx);
1129 ref->VisitReferences</*kVisitNativeRoots=*/ true, kDefaultVerifyFlags, kWithoutReadBarrier>(
1130 visitor, visitor);
1131 // Mark the corresponding card dirty if the object contains any
1132 // inter-region reference.
1133 if (visitor.ContainsInterRegionRefs()) {
1134 if (obj_region_idx == static_cast<size_t>(-1)) {
1135 // If an inter-region ref has been found in a non-region-space, then it
1136 // must be non-moving-space. This is because this function cannot be
1137 // called on a immune-space object, and a large-object-space object has
1138 // only class object reference, which is either in some immune-space, or
1139 // in non-moving-space.
1140 DCHECK(heap_->non_moving_space_->HasAddress(ref));
1141 non_moving_space_inter_region_bitmap_->Set(ref);
1142 } else {
1143 region_space_inter_region_bitmap_->Set(ref);
1144 }
1145 }
1146 }
1147
1148 template <bool kAtomic>
TestAndSetMarkBitForRef(mirror::Object * ref)1149 bool ConcurrentCopying::TestAndSetMarkBitForRef(mirror::Object* ref) {
1150 accounting::ContinuousSpaceBitmap* bitmap = nullptr;
1151 accounting::LargeObjectBitmap* los_bitmap = nullptr;
1152 if (LIKELY(region_space_->HasAddress(ref))) {
1153 bitmap = region_space_bitmap_;
1154 } else if (heap_->GetNonMovingSpace()->HasAddress(ref)) {
1155 bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
1156 } else if (immune_spaces_.ContainsObject(ref)) {
1157 // References to immune space objects are always live.
1158 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref));
1159 return true;
1160 } else {
1161 // Should be a large object. Must be page aligned and the LOS must exist.
1162 if (kIsDebugBuild
1163 && (!IsAligned<kPageSize>(ref) || heap_->GetLargeObjectsSpace() == nullptr)) {
1164 // It must be heap corruption. Remove memory protection and dump data.
1165 region_space_->Unprotect();
1166 heap_->GetVerification()->LogHeapCorruption(/* obj */ nullptr,
1167 MemberOffset(0),
1168 ref,
1169 /* fatal */ true);
1170 }
1171 los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
1172 }
1173 if (kAtomic) {
1174 return (bitmap != nullptr) ? bitmap->AtomicTestAndSet(ref) : los_bitmap->AtomicTestAndSet(ref);
1175 } else {
1176 return (bitmap != nullptr) ? bitmap->Set(ref) : los_bitmap->Set(ref);
1177 }
1178 }
1179
TestMarkBitmapForRef(mirror::Object * ref)1180 bool ConcurrentCopying::TestMarkBitmapForRef(mirror::Object* ref) {
1181 if (LIKELY(region_space_->HasAddress(ref))) {
1182 return region_space_bitmap_->Test(ref);
1183 } else if (heap_->GetNonMovingSpace()->HasAddress(ref)) {
1184 return heap_->GetNonMovingSpace()->GetMarkBitmap()->Test(ref);
1185 } else if (immune_spaces_.ContainsObject(ref)) {
1186 // References to immune space objects are always live.
1187 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref));
1188 return true;
1189 } else {
1190 // Should be a large object. Must be page aligned and the LOS must exist.
1191 if (kIsDebugBuild
1192 && (!IsAligned<kPageSize>(ref) || heap_->GetLargeObjectsSpace() == nullptr)) {
1193 // It must be heap corruption. Remove memory protection and dump data.
1194 region_space_->Unprotect();
1195 heap_->GetVerification()->LogHeapCorruption(/* obj */ nullptr,
1196 MemberOffset(0),
1197 ref,
1198 /* fatal */ true);
1199 }
1200 return heap_->GetLargeObjectsSpace()->GetMarkBitmap()->Test(ref);
1201 }
1202 }
1203
PushOntoLocalMarkStack(mirror::Object * ref)1204 void ConcurrentCopying::PushOntoLocalMarkStack(mirror::Object* ref) {
1205 if (kIsDebugBuild) {
1206 Thread *self = Thread::Current();
1207 DCHECK_EQ(thread_running_gc_, self);
1208 DCHECK(self->GetThreadLocalMarkStack() == nullptr);
1209 }
1210 DCHECK_EQ(mark_stack_mode_.load(std::memory_order_relaxed), kMarkStackModeThreadLocal);
1211 if (UNLIKELY(gc_mark_stack_->IsFull())) {
1212 ExpandGcMarkStack();
1213 }
1214 gc_mark_stack_->PushBack(ref);
1215 }
1216
ProcessMarkStackForMarkingAndComputeLiveBytes()1217 void ConcurrentCopying::ProcessMarkStackForMarkingAndComputeLiveBytes() {
1218 // Process thread-local mark stack containing thread roots
1219 ProcessThreadLocalMarkStacks(/* disable_weak_ref_access */ false,
1220 /* checkpoint_callback */ nullptr,
1221 [this] (mirror::Object* ref)
1222 REQUIRES_SHARED(Locks::mutator_lock_) {
1223 AddLiveBytesAndScanRef(ref);
1224 });
1225
1226 while (!gc_mark_stack_->IsEmpty()) {
1227 mirror::Object* ref = gc_mark_stack_->PopBack();
1228 AddLiveBytesAndScanRef(ref);
1229 }
1230 }
1231
1232 class ConcurrentCopying::ImmuneSpaceCaptureRefsVisitor {
1233 public:
ImmuneSpaceCaptureRefsVisitor(ConcurrentCopying * cc)1234 explicit ImmuneSpaceCaptureRefsVisitor(ConcurrentCopying* cc) : collector_(cc) {}
1235
operator ()(mirror::Object * obj) const1236 ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
1237 ComputeLiveBytesAndMarkRefFieldsVisitor</*kHandleInterRegionRefs*/ false>
1238 visitor(collector_, /*obj_region_idx*/ static_cast<size_t>(-1));
1239 obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
1240 visitor, visitor);
1241 }
1242
Callback(mirror::Object * obj,void * arg)1243 static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
1244 reinterpret_cast<ImmuneSpaceScanObjVisitor*>(arg)->operator()(obj);
1245 }
1246
1247 private:
1248 ConcurrentCopying* const collector_;
1249 };
1250
1251 /* Invariants for two-phase CC
1252 * ===========================
1253 * A) Definitions
1254 * ---------------
1255 * 1) Black: marked in bitmap, rb_state is non-gray, and not in mark stack
1256 * 2) Black-clean: marked in bitmap, and corresponding card is clean/aged
1257 * 3) Black-dirty: marked in bitmap, and corresponding card is dirty
1258 * 4) Gray: marked in bitmap, and exists in mark stack
1259 * 5) Gray-dirty: marked in bitmap, rb_state is gray, corresponding card is
1260 * dirty, and exists in mark stack
1261 * 6) White: unmarked in bitmap, rb_state is non-gray, and not in mark stack
1262 *
1263 * B) Before marking phase
1264 * -----------------------
1265 * 1) All objects are white
1266 * 2) Cards are either clean or aged (cannot be asserted without a STW pause)
1267 * 3) Mark bitmap is cleared
1268 * 4) Mark stack is empty
1269 *
1270 * C) During marking phase
1271 * ------------------------
1272 * 1) If a black object holds an inter-region or white reference, then its
1273 * corresponding card is dirty. In other words, it changes from being
1274 * black-clean to black-dirty
1275 * 2) No black-clean object points to a white object
1276 *
1277 * D) After marking phase
1278 * -----------------------
1279 * 1) There are no gray objects
1280 * 2) All newly allocated objects are in from space
1281 * 3) No white object can be reachable, directly or otherwise, from a
1282 * black-clean object
1283 *
1284 * E) During copying phase
1285 * ------------------------
1286 * 1) Mutators cannot observe white and black-dirty objects
1287 * 2) New allocations are in to-space (newly allocated regions are part of to-space)
1288 * 3) An object in mark stack must have its rb_state = Gray
1289 *
1290 * F) During card table scan
1291 * --------------------------
1292 * 1) Referents corresponding to root references are gray or in to-space
1293 * 2) Every path from an object that is read or written by a mutator during
1294 * this period to a dirty black object goes through some gray object.
1295 * Mutators preserve this by graying black objects as needed during this
1296 * period. Ensures that a mutator never encounters a black dirty object.
1297 *
1298 * G) After card table scan
1299 * ------------------------
1300 * 1) There are no black-dirty objects
1301 * 2) Referents corresponding to root references are gray, black-clean or in
1302 * to-space
1303 *
1304 * H) After copying phase
1305 * -----------------------
1306 * 1) Mark stack is empty
1307 * 2) No references into evacuated from-space
1308 * 3) No reference to an object which is unmarked and is also not in newly
1309 * allocated region. In other words, no reference to white objects.
1310 */
1311
MarkingPhase()1312 void ConcurrentCopying::MarkingPhase() {
1313 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
1314 if (kVerboseMode) {
1315 LOG(INFO) << "GC MarkingPhase";
1316 }
1317 accounting::CardTable* const card_table = heap_->GetCardTable();
1318 Thread* const self = Thread::Current();
1319 // Clear live_bytes_ of every non-free region, except the ones that are newly
1320 // allocated.
1321 region_space_->SetAllRegionLiveBytesZero();
1322 if (kIsDebugBuild) {
1323 region_space_->AssertAllRegionLiveBytesZeroOrCleared();
1324 }
1325 // Scan immune spaces
1326 {
1327 TimingLogger::ScopedTiming split2("ScanImmuneSpaces", GetTimings());
1328 for (auto& space : immune_spaces_.GetSpaces()) {
1329 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
1330 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1331 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
1332 ImmuneSpaceCaptureRefsVisitor visitor(this);
1333 if (table != nullptr) {
1334 table->VisitObjects(ImmuneSpaceCaptureRefsVisitor::Callback, &visitor);
1335 } else {
1336 WriterMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
1337 card_table->Scan<false>(
1338 live_bitmap,
1339 space->Begin(),
1340 space->Limit(),
1341 visitor,
1342 accounting::CardTable::kCardDirty - 1);
1343 }
1344 }
1345 }
1346 // Scan runtime roots
1347 {
1348 TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings());
1349 CaptureRootsForMarkingVisitor visitor(this, self);
1350 Runtime::Current()->VisitConcurrentRoots(&visitor, kVisitRootFlagAllRoots);
1351 }
1352 {
1353 // TODO: don't visit the transaction roots if it's not active.
1354 TimingLogger::ScopedTiming split2("VisitNonThreadRoots", GetTimings());
1355 CaptureRootsForMarkingVisitor visitor(this, self);
1356 Runtime::Current()->VisitNonThreadRoots(&visitor);
1357 }
1358 // Capture thread roots
1359 CaptureThreadRootsForMarking();
1360 // Process mark stack
1361 ProcessMarkStackForMarkingAndComputeLiveBytes();
1362
1363 if (kVerboseMode) {
1364 LOG(INFO) << "GC end of MarkingPhase";
1365 }
1366 }
1367
1368 template <bool kNoUnEvac>
ScanDirtyObject(mirror::Object * obj)1369 void ConcurrentCopying::ScanDirtyObject(mirror::Object* obj) {
1370 Scan<kNoUnEvac>(obj);
1371 // Set the read-barrier state of a reference-type object to gray if its
1372 // referent is not marked yet. This is to ensure that if GetReferent() is
1373 // called, it triggers the read-barrier to process the referent before use.
1374 if (UNLIKELY((obj->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass()))) {
1375 mirror::Object* referent =
1376 obj->AsReference<kVerifyNone, kWithoutReadBarrier>()->GetReferent<kWithoutReadBarrier>();
1377 if (referent != nullptr && !IsInToSpace(referent)) {
1378 obj->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState());
1379 }
1380 }
1381 }
1382
1383 // Concurrently mark roots that are guarded by read barriers and process the mark stack.
CopyingPhase()1384 void ConcurrentCopying::CopyingPhase() {
1385 TimingLogger::ScopedTiming split("CopyingPhase", GetTimings());
1386 if (kVerboseMode) {
1387 LOG(INFO) << "GC CopyingPhase";
1388 }
1389 Thread* self = Thread::Current();
1390 accounting::CardTable* const card_table = heap_->GetCardTable();
1391 if (kIsDebugBuild) {
1392 MutexLock mu(self, *Locks::thread_list_lock_);
1393 CHECK(weak_ref_access_enabled_);
1394 }
1395
1396 // Scan immune spaces.
1397 // Update all the fields in the immune spaces first without graying the objects so that we
1398 // minimize dirty pages in the immune spaces. Note mutators can concurrently access and gray some
1399 // of the objects.
1400 if (kUseBakerReadBarrier) {
1401 gc_grays_immune_objects_ = false;
1402 }
1403 if (use_generational_cc_) {
1404 if (kVerboseMode) {
1405 LOG(INFO) << "GC ScanCardsForSpace";
1406 }
1407 TimingLogger::ScopedTiming split2("ScanCardsForSpace", GetTimings());
1408 WriterMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
1409 CHECK(!done_scanning_.load(std::memory_order_relaxed));
1410 if (kIsDebugBuild) {
1411 // Leave some time for mutators to race ahead to try and find races between the GC card
1412 // scanning and mutators reading references.
1413 usleep(10 * 1000);
1414 }
1415 for (space::ContinuousSpace* space : GetHeap()->GetContinuousSpaces()) {
1416 if (space->IsImageSpace() || space->IsZygoteSpace()) {
1417 // Image and zygote spaces are already handled since we gray the objects in the pause.
1418 continue;
1419 }
1420 // Scan all of the objects on dirty cards in unevac from space, and non moving space. These
1421 // are from previous GCs (or from marking phase of 2-phase full GC) and may reference things
1422 // in the from space.
1423 //
1424 // Note that we do not need to process the large-object space (the only discontinuous space)
1425 // as it contains only large string objects and large primitive array objects, that have no
1426 // reference to other objects, except their class. There is no need to scan these large
1427 // objects, as the String class and the primitive array classes are expected to never move
1428 // during a collection:
1429 // - In the case where we run with a boot image, these classes are part of the image space,
1430 // which is an immune space.
1431 // - In the case where we run without a boot image, these classes are allocated in the
1432 // non-moving space (see art::ClassLinker::InitWithoutImage).
1433 card_table->Scan<false>(
1434 space->GetMarkBitmap(),
1435 space->Begin(),
1436 space->End(),
1437 [this, space](mirror::Object* obj)
1438 REQUIRES(Locks::heap_bitmap_lock_)
1439 REQUIRES_SHARED(Locks::mutator_lock_) {
1440 // TODO: This code may be refactored to avoid scanning object while
1441 // done_scanning_ is false by setting rb_state to gray, and pushing the
1442 // object on mark stack. However, it will also require clearing the
1443 // corresponding mark-bit and, for region space objects,
1444 // decrementing the object's size from the corresponding region's
1445 // live_bytes.
1446 if (young_gen_) {
1447 // Don't push or gray unevac refs.
1448 if (kIsDebugBuild && space == region_space_) {
1449 // We may get unevac large objects.
1450 if (!region_space_->IsInUnevacFromSpace(obj)) {
1451 CHECK(region_space_bitmap_->Test(obj));
1452 region_space_->DumpRegionForObject(LOG_STREAM(FATAL_WITHOUT_ABORT), obj);
1453 LOG(FATAL) << "Scanning " << obj << " not in unevac space";
1454 }
1455 }
1456 ScanDirtyObject</*kNoUnEvac*/ true>(obj);
1457 } else if (space != region_space_) {
1458 DCHECK(space == heap_->non_moving_space_);
1459 // We need to process un-evac references as they may be unprocessed,
1460 // if they skipped the marking phase due to heap mutation.
1461 ScanDirtyObject</*kNoUnEvac*/ false>(obj);
1462 non_moving_space_inter_region_bitmap_->Clear(obj);
1463 } else if (region_space_->IsInUnevacFromSpace(obj)) {
1464 ScanDirtyObject</*kNoUnEvac*/ false>(obj);
1465 region_space_inter_region_bitmap_->Clear(obj);
1466 }
1467 },
1468 accounting::CardTable::kCardAged);
1469
1470 if (!young_gen_) {
1471 auto visitor = [this](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
1472 // We don't need to process un-evac references as any unprocessed
1473 // ones will be taken care of in the card-table scan above.
1474 ScanDirtyObject</*kNoUnEvac*/ true>(obj);
1475 };
1476 if (space == region_space_) {
1477 region_space_->ScanUnevacFromSpace(region_space_inter_region_bitmap_.get(), visitor);
1478 } else {
1479 DCHECK(space == heap_->non_moving_space_);
1480 non_moving_space_inter_region_bitmap_->VisitMarkedRange(
1481 reinterpret_cast<uintptr_t>(space->Begin()),
1482 reinterpret_cast<uintptr_t>(space->End()),
1483 visitor);
1484 }
1485 }
1486 }
1487 // Done scanning unevac space.
1488 done_scanning_.store(true, std::memory_order_release);
1489 // NOTE: inter-region-ref bitmaps can be cleared here to release memory, if needed.
1490 // Currently we do it in ReclaimPhase().
1491 if (kVerboseMode) {
1492 LOG(INFO) << "GC end of ScanCardsForSpace";
1493 }
1494 }
1495 {
1496 // For a sticky-bit collection, this phase needs to be after the card scanning since the
1497 // mutator may read an unevac space object out of an image object. If the image object is no
1498 // longer gray it will trigger a read barrier for the unevac space object.
1499 TimingLogger::ScopedTiming split2("ScanImmuneSpaces", GetTimings());
1500 for (auto& space : immune_spaces_.GetSpaces()) {
1501 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
1502 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1503 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
1504 ImmuneSpaceScanObjVisitor visitor(this);
1505 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects && table != nullptr) {
1506 table->VisitObjects(ImmuneSpaceScanObjVisitor::Callback, &visitor);
1507 } else {
1508 WriterMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
1509 card_table->Scan<false>(
1510 live_bitmap,
1511 space->Begin(),
1512 space->Limit(),
1513 visitor,
1514 accounting::CardTable::kCardDirty - 1);
1515 }
1516 }
1517 }
1518 if (kUseBakerReadBarrier) {
1519 // This release fence makes the field updates in the above loop visible before allowing mutator
1520 // getting access to immune objects without graying it first.
1521 updated_all_immune_objects_.store(true, std::memory_order_release);
1522 // Now "un-gray" (conceptually blacken) immune objects concurrently accessed and grayed by
1523 // mutators. We can't do this in the above loop because we would incorrectly disable the read
1524 // barrier by un-graying (conceptually blackening) an object which may point to an unscanned,
1525 // white object, breaking the to-space invariant (a mutator shall never observe a from-space
1526 // (white) object).
1527 //
1528 // Make sure no mutators are in the middle of marking an immune object before un-graying
1529 // (blackening) immune objects.
1530 IssueEmptyCheckpoint();
1531 MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
1532 if (kVerboseMode) {
1533 LOG(INFO) << "immune gray stack size=" << immune_gray_stack_.size();
1534 }
1535 for (mirror::Object* obj : immune_gray_stack_) {
1536 DCHECK_EQ(obj->GetReadBarrierState(), ReadBarrier::GrayState());
1537 bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
1538 ReadBarrier::NonGrayState());
1539 DCHECK(success);
1540 }
1541 immune_gray_stack_.clear();
1542 }
1543
1544 {
1545 TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings());
1546 Runtime::Current()->VisitConcurrentRoots(this, kVisitRootFlagAllRoots);
1547 }
1548 {
1549 // TODO: don't visit the transaction roots if it's not active.
1550 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
1551 Runtime::Current()->VisitNonThreadRoots(this);
1552 }
1553
1554 {
1555 TimingLogger::ScopedTiming split7("ProcessMarkStack", GetTimings());
1556 // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The
1557 // primary reasons are the fact that we need to use a checkpoint to process thread-local mark
1558 // stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock
1559 // issue because running threads potentially blocking at WaitHoldingLocks, and that once we
1560 // reach the point where we process weak references, we can avoid using a lock when accessing
1561 // the GC mark stack, which makes mark stack processing more efficient.
1562
1563 // Process the mark stack once in the thread local stack mode. This marks most of the live
1564 // objects, aside from weak ref accesses with read barriers (Reference::GetReferent() and system
1565 // weaks) that may happen concurrently while we processing the mark stack and newly mark/gray
1566 // objects and push refs on the mark stack.
1567 ProcessMarkStack();
1568 // Switch to the shared mark stack mode. That is, revoke and process thread-local mark stacks
1569 // for the last time before transitioning to the shared mark stack mode, which would process new
1570 // refs that may have been concurrently pushed onto the mark stack during the ProcessMarkStack()
1571 // call above. At the same time, disable weak ref accesses using a per-thread flag. It's
1572 // important to do these together in a single checkpoint so that we can ensure that mutators
1573 // won't newly gray objects and push new refs onto the mark stack due to weak ref accesses and
1574 // mutators safely transition to the shared mark stack mode (without leaving unprocessed refs on
1575 // the thread-local mark stacks), without a race. This is why we use a thread-local weak ref
1576 // access flag Thread::tls32_.weak_ref_access_enabled_ instead of the global ones.
1577 SwitchToSharedMarkStackMode();
1578 CHECK(!self->GetWeakRefAccessEnabled());
1579 // Now that weak refs accesses are disabled, once we exhaust the shared mark stack again here
1580 // (which may be non-empty if there were refs found on thread-local mark stacks during the above
1581 // SwitchToSharedMarkStackMode() call), we won't have new refs to process, that is, mutators
1582 // (via read barriers) have no way to produce any more refs to process. Marking converges once
1583 // before we process weak refs below.
1584 ProcessMarkStack();
1585 CheckEmptyMarkStack();
1586 // Switch to the GC exclusive mark stack mode so that we can process the mark stack without a
1587 // lock from this point on.
1588 SwitchToGcExclusiveMarkStackMode();
1589 CheckEmptyMarkStack();
1590 if (kVerboseMode) {
1591 LOG(INFO) << "ProcessReferences";
1592 }
1593 // Process weak references. This may produce new refs to process and have them processed via
1594 // ProcessMarkStack (in the GC exclusive mark stack mode).
1595 ProcessReferences(self);
1596 CheckEmptyMarkStack();
1597 if (kVerboseMode) {
1598 LOG(INFO) << "SweepSystemWeaks";
1599 }
1600 SweepSystemWeaks(self);
1601 if (kVerboseMode) {
1602 LOG(INFO) << "SweepSystemWeaks done";
1603 }
1604 // Process the mark stack here one last time because the above SweepSystemWeaks() call may have
1605 // marked some objects (strings alive) as hash_set::Erase() can call the hash function for
1606 // arbitrary elements in the weak intern table in InternTable::Table::SweepWeaks().
1607 ProcessMarkStack();
1608 CheckEmptyMarkStack();
1609 // Re-enable weak ref accesses.
1610 ReenableWeakRefAccess(self);
1611 // Free data for class loaders that we unloaded.
1612 Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
1613 // Marking is done. Disable marking.
1614 DisableMarking();
1615 CheckEmptyMarkStack();
1616 }
1617
1618 if (kIsDebugBuild) {
1619 MutexLock mu(self, *Locks::thread_list_lock_);
1620 CHECK(weak_ref_access_enabled_);
1621 }
1622 if (kVerboseMode) {
1623 LOG(INFO) << "GC end of CopyingPhase";
1624 }
1625 }
1626
ReenableWeakRefAccess(Thread * self)1627 void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) {
1628 if (kVerboseMode) {
1629 LOG(INFO) << "ReenableWeakRefAccess";
1630 }
1631 // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access.
1632 {
1633 MutexLock mu(self, *Locks::thread_list_lock_);
1634 weak_ref_access_enabled_ = true; // This is for new threads.
1635 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1636 for (Thread* thread : thread_list) {
1637 thread->SetWeakRefAccessEnabled(true);
1638 }
1639 }
1640 // Unblock blocking threads.
1641 GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
1642 Runtime::Current()->BroadcastForNewSystemWeaks();
1643 }
1644
1645 class ConcurrentCopying::DisableMarkingCheckpoint : public Closure {
1646 public:
DisableMarkingCheckpoint(ConcurrentCopying * concurrent_copying)1647 explicit DisableMarkingCheckpoint(ConcurrentCopying* concurrent_copying)
1648 : concurrent_copying_(concurrent_copying) {
1649 }
1650
Run(Thread * thread)1651 void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
1652 // Note: self is not necessarily equal to thread since thread may be suspended.
1653 Thread* self = Thread::Current();
1654 DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
1655 << thread->GetState() << " thread " << thread << " self " << self;
1656 // Disable the thread-local is_gc_marking flag.
1657 // Note a thread that has just started right before this checkpoint may have already this flag
1658 // set to false, which is ok.
1659 thread->SetIsGcMarkingAndUpdateEntrypoints(false);
1660 // If thread is a running mutator, then act on behalf of the garbage collector.
1661 // See the code in ThreadList::RunCheckpoint.
1662 concurrent_copying_->GetBarrier().Pass(self);
1663 }
1664
1665 private:
1666 ConcurrentCopying* const concurrent_copying_;
1667 };
1668
1669 class ConcurrentCopying::DisableMarkingCallback : public Closure {
1670 public:
DisableMarkingCallback(ConcurrentCopying * concurrent_copying)1671 explicit DisableMarkingCallback(ConcurrentCopying* concurrent_copying)
1672 : concurrent_copying_(concurrent_copying) {
1673 }
1674
Run(Thread * self ATTRIBUTE_UNUSED)1675 void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
1676 // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
1677 // to avoid a race with ThreadList::Register().
1678 CHECK(concurrent_copying_->is_marking_);
1679 concurrent_copying_->is_marking_ = false;
1680 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
1681 CHECK(concurrent_copying_->is_using_read_barrier_entrypoints_);
1682 concurrent_copying_->is_using_read_barrier_entrypoints_ = false;
1683 } else {
1684 CHECK(!concurrent_copying_->is_using_read_barrier_entrypoints_);
1685 }
1686 }
1687
1688 private:
1689 ConcurrentCopying* const concurrent_copying_;
1690 };
1691
IssueDisableMarkingCheckpoint()1692 void ConcurrentCopying::IssueDisableMarkingCheckpoint() {
1693 Thread* self = Thread::Current();
1694 DisableMarkingCheckpoint check_point(this);
1695 ThreadList* thread_list = Runtime::Current()->GetThreadList();
1696 gc_barrier_->Init(self, 0);
1697 DisableMarkingCallback dmc(this);
1698 size_t barrier_count = thread_list->RunCheckpoint(&check_point, &dmc);
1699 // If there are no threads to wait which implies that all the checkpoint functions are finished,
1700 // then no need to release the mutator lock.
1701 if (barrier_count == 0) {
1702 return;
1703 }
1704 // Release locks then wait for all mutator threads to pass the barrier.
1705 Locks::mutator_lock_->SharedUnlock(self);
1706 {
1707 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1708 gc_barrier_->Increment(self, barrier_count);
1709 }
1710 Locks::mutator_lock_->SharedLock(self);
1711 }
1712
DisableMarking()1713 void ConcurrentCopying::DisableMarking() {
1714 // Use a checkpoint to turn off the global is_marking and the thread-local is_gc_marking flags and
1715 // to ensure no threads are still in the middle of a read barrier which may have a from-space ref
1716 // cached in a local variable.
1717 IssueDisableMarkingCheckpoint();
1718 if (kUseTableLookupReadBarrier) {
1719 heap_->rb_table_->ClearAll();
1720 DCHECK(heap_->rb_table_->IsAllCleared());
1721 }
1722 is_mark_stack_push_disallowed_.store(1, std::memory_order_seq_cst);
1723 mark_stack_mode_.store(kMarkStackModeOff, std::memory_order_seq_cst);
1724 }
1725
IssueEmptyCheckpoint()1726 void ConcurrentCopying::IssueEmptyCheckpoint() {
1727 Thread* self = Thread::Current();
1728 ThreadList* thread_list = Runtime::Current()->GetThreadList();
1729 // Release locks then wait for all mutator threads to pass the barrier.
1730 Locks::mutator_lock_->SharedUnlock(self);
1731 thread_list->RunEmptyCheckpoint();
1732 Locks::mutator_lock_->SharedLock(self);
1733 }
1734
ExpandGcMarkStack()1735 void ConcurrentCopying::ExpandGcMarkStack() {
1736 DCHECK(gc_mark_stack_->IsFull());
1737 const size_t new_size = gc_mark_stack_->Capacity() * 2;
1738 std::vector<StackReference<mirror::Object>> temp(gc_mark_stack_->Begin(),
1739 gc_mark_stack_->End());
1740 gc_mark_stack_->Resize(new_size);
1741 for (auto& ref : temp) {
1742 gc_mark_stack_->PushBack(ref.AsMirrorPtr());
1743 }
1744 DCHECK(!gc_mark_stack_->IsFull());
1745 }
1746
PushOntoMarkStack(Thread * const self,mirror::Object * to_ref)1747 void ConcurrentCopying::PushOntoMarkStack(Thread* const self, mirror::Object* to_ref) {
1748 CHECK_EQ(is_mark_stack_push_disallowed_.load(std::memory_order_relaxed), 0)
1749 << " " << to_ref << " " << mirror::Object::PrettyTypeOf(to_ref);
1750 CHECK(thread_running_gc_ != nullptr);
1751 MarkStackMode mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed);
1752 if (LIKELY(mark_stack_mode == kMarkStackModeThreadLocal)) {
1753 if (LIKELY(self == thread_running_gc_)) {
1754 // If GC-running thread, use the GC mark stack instead of a thread-local mark stack.
1755 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1756 if (UNLIKELY(gc_mark_stack_->IsFull())) {
1757 ExpandGcMarkStack();
1758 }
1759 gc_mark_stack_->PushBack(to_ref);
1760 } else {
1761 // Otherwise, use a thread-local mark stack.
1762 accounting::AtomicStack<mirror::Object>* tl_mark_stack = self->GetThreadLocalMarkStack();
1763 if (UNLIKELY(tl_mark_stack == nullptr || tl_mark_stack->IsFull())) {
1764 MutexLock mu(self, mark_stack_lock_);
1765 // Get a new thread local mark stack.
1766 accounting::AtomicStack<mirror::Object>* new_tl_mark_stack;
1767 if (!pooled_mark_stacks_.empty()) {
1768 // Use a pooled mark stack.
1769 new_tl_mark_stack = pooled_mark_stacks_.back();
1770 pooled_mark_stacks_.pop_back();
1771 } else {
1772 // None pooled. Create a new one.
1773 new_tl_mark_stack =
1774 accounting::AtomicStack<mirror::Object>::Create(
1775 "thread local mark stack", 4 * KB, 4 * KB);
1776 }
1777 DCHECK(new_tl_mark_stack != nullptr);
1778 DCHECK(new_tl_mark_stack->IsEmpty());
1779 new_tl_mark_stack->PushBack(to_ref);
1780 self->SetThreadLocalMarkStack(new_tl_mark_stack);
1781 if (tl_mark_stack != nullptr) {
1782 // Store the old full stack into a vector.
1783 revoked_mark_stacks_.push_back(tl_mark_stack);
1784 }
1785 } else {
1786 tl_mark_stack->PushBack(to_ref);
1787 }
1788 }
1789 } else if (mark_stack_mode == kMarkStackModeShared) {
1790 // Access the shared GC mark stack with a lock.
1791 MutexLock mu(self, mark_stack_lock_);
1792 if (UNLIKELY(gc_mark_stack_->IsFull())) {
1793 ExpandGcMarkStack();
1794 }
1795 gc_mark_stack_->PushBack(to_ref);
1796 } else {
1797 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
1798 static_cast<uint32_t>(kMarkStackModeGcExclusive))
1799 << "ref=" << to_ref
1800 << " self->gc_marking=" << self->GetIsGcMarking()
1801 << " cc->is_marking=" << is_marking_;
1802 CHECK(self == thread_running_gc_)
1803 << "Only GC-running thread should access the mark stack "
1804 << "in the GC exclusive mark stack mode";
1805 // Access the GC mark stack without a lock.
1806 if (UNLIKELY(gc_mark_stack_->IsFull())) {
1807 ExpandGcMarkStack();
1808 }
1809 gc_mark_stack_->PushBack(to_ref);
1810 }
1811 }
1812
GetAllocationStack()1813 accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() {
1814 return heap_->allocation_stack_.get();
1815 }
1816
GetLiveStack()1817 accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
1818 return heap_->live_stack_.get();
1819 }
1820
1821 // The following visitors are used to verify that there's no references to the from-space left after
1822 // marking.
1823 class ConcurrentCopying::VerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
1824 public:
VerifyNoFromSpaceRefsVisitor(ConcurrentCopying * collector)1825 explicit VerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
1826 : collector_(collector) {}
1827
operator ()(mirror::Object * ref,MemberOffset offset=MemberOffset (0),mirror::Object * holder=nullptr) const1828 void operator()(mirror::Object* ref,
1829 MemberOffset offset = MemberOffset(0),
1830 mirror::Object* holder = nullptr) const
1831 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
1832 if (ref == nullptr) {
1833 // OK.
1834 return;
1835 }
1836 collector_->AssertToSpaceInvariant(holder, offset, ref);
1837 if (kUseBakerReadBarrier) {
1838 CHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::NonGrayState())
1839 << "Ref " << ref << " " << ref->PrettyTypeOf() << " has gray rb_state";
1840 }
1841 }
1842
VisitRoot(mirror::Object * root,const RootInfo & info ATTRIBUTE_UNUSED)1843 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
1844 override REQUIRES_SHARED(Locks::mutator_lock_) {
1845 DCHECK(root != nullptr);
1846 operator()(root);
1847 }
1848
1849 private:
1850 ConcurrentCopying* const collector_;
1851 };
1852
1853 class ConcurrentCopying::VerifyNoFromSpaceRefsFieldVisitor {
1854 public:
VerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying * collector)1855 explicit VerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
1856 : collector_(collector) {}
1857
operator ()(ObjPtr<mirror::Object> obj,MemberOffset offset,bool is_static ATTRIBUTE_UNUSED) const1858 void operator()(ObjPtr<mirror::Object> obj,
1859 MemberOffset offset,
1860 bool is_static ATTRIBUTE_UNUSED) const
1861 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
1862 mirror::Object* ref =
1863 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
1864 VerifyNoFromSpaceRefsVisitor visitor(collector_);
1865 visitor(ref, offset, obj.Ptr());
1866 }
operator ()(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref) const1867 void operator()(ObjPtr<mirror::Class> klass,
1868 ObjPtr<mirror::Reference> ref) const
1869 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
1870 CHECK(klass->IsTypeOfReferenceClass());
1871 this->operator()(ref, mirror::Reference::ReferentOffset(), false);
1872 }
1873
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const1874 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
1875 REQUIRES_SHARED(Locks::mutator_lock_) {
1876 if (!root->IsNull()) {
1877 VisitRoot(root);
1878 }
1879 }
1880
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const1881 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
1882 REQUIRES_SHARED(Locks::mutator_lock_) {
1883 VerifyNoFromSpaceRefsVisitor visitor(collector_);
1884 visitor(root->AsMirrorPtr());
1885 }
1886
1887 private:
1888 ConcurrentCopying* const collector_;
1889 };
1890
1891 // Verify there's no from-space references left after the marking phase.
VerifyNoFromSpaceReferences()1892 void ConcurrentCopying::VerifyNoFromSpaceReferences() {
1893 Thread* self = Thread::Current();
1894 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
1895 // Verify all threads have is_gc_marking to be false
1896 {
1897 MutexLock mu(self, *Locks::thread_list_lock_);
1898 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1899 for (Thread* thread : thread_list) {
1900 CHECK(!thread->GetIsGcMarking());
1901 }
1902 }
1903
1904 auto verify_no_from_space_refs_visitor = [&](mirror::Object* obj)
1905 REQUIRES_SHARED(Locks::mutator_lock_) {
1906 CHECK(obj != nullptr);
1907 space::RegionSpace* region_space = RegionSpace();
1908 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
1909 VerifyNoFromSpaceRefsFieldVisitor visitor(this);
1910 obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
1911 visitor,
1912 visitor);
1913 if (kUseBakerReadBarrier) {
1914 CHECK_EQ(obj->GetReadBarrierState(), ReadBarrier::NonGrayState())
1915 << "obj=" << obj << " has gray rb_state " << obj->GetReadBarrierState();
1916 }
1917 };
1918 // Roots.
1919 {
1920 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1921 VerifyNoFromSpaceRefsVisitor ref_visitor(this);
1922 Runtime::Current()->VisitRoots(&ref_visitor);
1923 }
1924 // The to-space.
1925 region_space_->WalkToSpace(verify_no_from_space_refs_visitor);
1926 // Non-moving spaces.
1927 {
1928 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1929 heap_->GetMarkBitmap()->Visit(verify_no_from_space_refs_visitor);
1930 }
1931 // The alloc stack.
1932 {
1933 VerifyNoFromSpaceRefsVisitor ref_visitor(this);
1934 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End();
1935 it < end; ++it) {
1936 mirror::Object* const obj = it->AsMirrorPtr();
1937 if (obj != nullptr && obj->GetClass() != nullptr) {
1938 // TODO: need to call this only if obj is alive?
1939 ref_visitor(obj);
1940 verify_no_from_space_refs_visitor(obj);
1941 }
1942 }
1943 }
1944 // TODO: LOS. But only refs in LOS are classes.
1945 }
1946
1947 // The following visitors are used to assert the to-space invariant.
1948 class ConcurrentCopying::AssertToSpaceInvariantFieldVisitor {
1949 public:
AssertToSpaceInvariantFieldVisitor(ConcurrentCopying * collector)1950 explicit AssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
1951 : collector_(collector) {}
1952
operator ()(ObjPtr<mirror::Object> obj,MemberOffset offset,bool is_static ATTRIBUTE_UNUSED) const1953 void operator()(ObjPtr<mirror::Object> obj,
1954 MemberOffset offset,
1955 bool is_static ATTRIBUTE_UNUSED) const
1956 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
1957 mirror::Object* ref =
1958 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
1959 collector_->AssertToSpaceInvariant(obj.Ptr(), offset, ref);
1960 }
operator ()(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const1961 void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const
1962 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
1963 CHECK(klass->IsTypeOfReferenceClass());
1964 }
1965
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const1966 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
1967 REQUIRES_SHARED(Locks::mutator_lock_) {
1968 if (!root->IsNull()) {
1969 VisitRoot(root);
1970 }
1971 }
1972
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const1973 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
1974 REQUIRES_SHARED(Locks::mutator_lock_) {
1975 mirror::Object* ref = root->AsMirrorPtr();
1976 collector_->AssertToSpaceInvariant(/* obj */ nullptr, MemberOffset(0), ref);
1977 }
1978
1979 private:
1980 ConcurrentCopying* const collector_;
1981 };
1982
RevokeThreadLocalMarkStacks(bool disable_weak_ref_access,Closure * checkpoint_callback)1983 void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access,
1984 Closure* checkpoint_callback) {
1985 Thread* self = Thread::Current();
1986 RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access);
1987 ThreadList* thread_list = Runtime::Current()->GetThreadList();
1988 gc_barrier_->Init(self, 0);
1989 size_t barrier_count = thread_list->RunCheckpoint(&check_point, checkpoint_callback);
1990 // If there are no threads to wait which implys that all the checkpoint functions are finished,
1991 // then no need to release the mutator lock.
1992 if (barrier_count == 0) {
1993 return;
1994 }
1995 Locks::mutator_lock_->SharedUnlock(self);
1996 {
1997 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1998 gc_barrier_->Increment(self, barrier_count);
1999 }
2000 Locks::mutator_lock_->SharedLock(self);
2001 }
2002
RevokeThreadLocalMarkStack(Thread * thread)2003 void ConcurrentCopying::RevokeThreadLocalMarkStack(Thread* thread) {
2004 Thread* self = Thread::Current();
2005 CHECK_EQ(self, thread);
2006 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
2007 if (tl_mark_stack != nullptr) {
2008 CHECK(is_marking_);
2009 MutexLock mu(self, mark_stack_lock_);
2010 revoked_mark_stacks_.push_back(tl_mark_stack);
2011 thread->SetThreadLocalMarkStack(nullptr);
2012 }
2013 }
2014
ProcessMarkStack()2015 void ConcurrentCopying::ProcessMarkStack() {
2016 if (kVerboseMode) {
2017 LOG(INFO) << "ProcessMarkStack. ";
2018 }
2019 bool empty_prev = false;
2020 while (true) {
2021 bool empty = ProcessMarkStackOnce();
2022 if (empty_prev && empty) {
2023 // Saw empty mark stack for a second time, done.
2024 break;
2025 }
2026 empty_prev = empty;
2027 }
2028 }
2029
ProcessMarkStackOnce()2030 bool ConcurrentCopying::ProcessMarkStackOnce() {
2031 DCHECK(thread_running_gc_ != nullptr);
2032 Thread* const self = Thread::Current();
2033 DCHECK(self == thread_running_gc_);
2034 DCHECK(thread_running_gc_->GetThreadLocalMarkStack() == nullptr);
2035 size_t count = 0;
2036 MarkStackMode mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed);
2037 if (mark_stack_mode == kMarkStackModeThreadLocal) {
2038 // Process the thread-local mark stacks and the GC mark stack.
2039 count += ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ false,
2040 /* checkpoint_callback= */ nullptr,
2041 [this] (mirror::Object* ref)
2042 REQUIRES_SHARED(Locks::mutator_lock_) {
2043 ProcessMarkStackRef(ref);
2044 });
2045 while (!gc_mark_stack_->IsEmpty()) {
2046 mirror::Object* to_ref = gc_mark_stack_->PopBack();
2047 ProcessMarkStackRef(to_ref);
2048 ++count;
2049 }
2050 gc_mark_stack_->Reset();
2051 } else if (mark_stack_mode == kMarkStackModeShared) {
2052 // Do an empty checkpoint to avoid a race with a mutator preempted in the middle of a read
2053 // barrier but before pushing onto the mark stack. b/32508093. Note the weak ref access is
2054 // disabled at this point.
2055 IssueEmptyCheckpoint();
2056 // Process the shared GC mark stack with a lock.
2057 {
2058 MutexLock mu(thread_running_gc_, mark_stack_lock_);
2059 CHECK(revoked_mark_stacks_.empty());
2060 }
2061 while (true) {
2062 std::vector<mirror::Object*> refs;
2063 {
2064 // Copy refs with lock. Note the number of refs should be small.
2065 MutexLock mu(thread_running_gc_, mark_stack_lock_);
2066 if (gc_mark_stack_->IsEmpty()) {
2067 break;
2068 }
2069 for (StackReference<mirror::Object>* p = gc_mark_stack_->Begin();
2070 p != gc_mark_stack_->End(); ++p) {
2071 refs.push_back(p->AsMirrorPtr());
2072 }
2073 gc_mark_stack_->Reset();
2074 }
2075 for (mirror::Object* ref : refs) {
2076 ProcessMarkStackRef(ref);
2077 ++count;
2078 }
2079 }
2080 } else {
2081 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
2082 static_cast<uint32_t>(kMarkStackModeGcExclusive));
2083 {
2084 MutexLock mu(thread_running_gc_, mark_stack_lock_);
2085 CHECK(revoked_mark_stacks_.empty());
2086 }
2087 // Process the GC mark stack in the exclusive mode. No need to take the lock.
2088 while (!gc_mark_stack_->IsEmpty()) {
2089 mirror::Object* to_ref = gc_mark_stack_->PopBack();
2090 ProcessMarkStackRef(to_ref);
2091 ++count;
2092 }
2093 gc_mark_stack_->Reset();
2094 }
2095
2096 // Return true if the stack was empty.
2097 return count == 0;
2098 }
2099
2100 template <typename Processor>
ProcessThreadLocalMarkStacks(bool disable_weak_ref_access,Closure * checkpoint_callback,const Processor & processor)2101 size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access,
2102 Closure* checkpoint_callback,
2103 const Processor& processor) {
2104 // Run a checkpoint to collect all thread local mark stacks and iterate over them all.
2105 RevokeThreadLocalMarkStacks(disable_weak_ref_access, checkpoint_callback);
2106 size_t count = 0;
2107 std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks;
2108 {
2109 MutexLock mu(thread_running_gc_, mark_stack_lock_);
2110 // Make a copy of the mark stack vector.
2111 mark_stacks = revoked_mark_stacks_;
2112 revoked_mark_stacks_.clear();
2113 }
2114 for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) {
2115 for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) {
2116 mirror::Object* to_ref = p->AsMirrorPtr();
2117 processor(to_ref);
2118 ++count;
2119 }
2120 {
2121 MutexLock mu(thread_running_gc_, mark_stack_lock_);
2122 if (pooled_mark_stacks_.size() >= kMarkStackPoolSize) {
2123 // The pool has enough. Delete it.
2124 delete mark_stack;
2125 } else {
2126 // Otherwise, put it into the pool for later reuse.
2127 mark_stack->Reset();
2128 pooled_mark_stacks_.push_back(mark_stack);
2129 }
2130 }
2131 }
2132 return count;
2133 }
2134
ProcessMarkStackRef(mirror::Object * to_ref)2135 inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
2136 DCHECK(!region_space_->IsInFromSpace(to_ref));
2137 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(to_ref);
2138 if (kUseBakerReadBarrier) {
2139 DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState())
2140 << " to_ref=" << to_ref
2141 << " rb_state=" << to_ref->GetReadBarrierState()
2142 << " is_marked=" << IsMarked(to_ref)
2143 << " type=" << to_ref->PrettyTypeOf()
2144 << " young_gen=" << std::boolalpha << young_gen_ << std::noboolalpha
2145 << " space=" << heap_->DumpSpaceNameFromAddress(to_ref)
2146 << " region_type=" << rtype
2147 // TODO: Temporary; remove this when this is no longer needed (b/116087961).
2148 << " runtime->sentinel=" << Runtime::Current()->GetSentinel().Read<kWithoutReadBarrier>();
2149 }
2150 bool add_to_live_bytes = false;
2151 // Invariant: There should be no object from a newly-allocated
2152 // region (either large or non-large) on the mark stack.
2153 DCHECK(!region_space_->IsInNewlyAllocatedRegion(to_ref)) << to_ref;
2154 bool perform_scan = false;
2155 switch (rtype) {
2156 case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace:
2157 // Mark the bitmap only in the GC thread here so that we don't need a CAS.
2158 if (!kUseBakerReadBarrier || !region_space_bitmap_->Set(to_ref)) {
2159 // It may be already marked if we accidentally pushed the same object twice due to the racy
2160 // bitmap read in MarkUnevacFromSpaceRegion.
2161 if (use_generational_cc_ && young_gen_) {
2162 CHECK(region_space_->IsLargeObject(to_ref));
2163 region_space_->ZeroLiveBytesForLargeObject(to_ref);
2164 }
2165 perform_scan = true;
2166 // Only add to the live bytes if the object was not already marked and we are not the young
2167 // GC.
2168 // Why add live bytes even after 2-phase GC?
2169 // We need to ensure that if there is a unevac region with any live
2170 // objects, then its live_bytes must be non-zero. Otherwise,
2171 // ClearFromSpace() will clear the region. Considering, that we may skip
2172 // live objects during marking phase of 2-phase GC, we have to take care
2173 // of such objects here.
2174 add_to_live_bytes = true;
2175 }
2176 break;
2177 case space::RegionSpace::RegionType::kRegionTypeToSpace:
2178 if (use_generational_cc_) {
2179 // Copied to to-space, set the bit so that the next GC can scan objects.
2180 region_space_bitmap_->Set(to_ref);
2181 }
2182 perform_scan = true;
2183 break;
2184 default:
2185 DCHECK(!region_space_->HasAddress(to_ref)) << to_ref;
2186 DCHECK(!immune_spaces_.ContainsObject(to_ref));
2187 // Non-moving or large-object space.
2188 if (kUseBakerReadBarrier) {
2189 accounting::ContinuousSpaceBitmap* mark_bitmap =
2190 heap_->GetNonMovingSpace()->GetMarkBitmap();
2191 const bool is_los = !mark_bitmap->HasAddress(to_ref);
2192 if (is_los) {
2193 if (!IsAligned<kPageSize>(to_ref)) {
2194 // Ref is a large object that is not aligned, it must be heap
2195 // corruption. Remove memory protection and dump data before
2196 // AtomicSetReadBarrierState since it will fault if the address is not
2197 // valid.
2198 region_space_->Unprotect();
2199 heap_->GetVerification()->LogHeapCorruption(/* obj */ nullptr,
2200 MemberOffset(0),
2201 to_ref,
2202 /* fatal */ true);
2203 }
2204 DCHECK(heap_->GetLargeObjectsSpace())
2205 << "ref=" << to_ref
2206 << " doesn't belong to non-moving space and large object space doesn't exist";
2207 accounting::LargeObjectBitmap* los_bitmap =
2208 heap_->GetLargeObjectsSpace()->GetMarkBitmap();
2209 DCHECK(los_bitmap->HasAddress(to_ref));
2210 // Only the GC thread could be setting the LOS bit map hence doesn't
2211 // need to be atomically done.
2212 perform_scan = !los_bitmap->Set(to_ref);
2213 } else {
2214 // Only the GC thread could be setting the non-moving space bit map
2215 // hence doesn't need to be atomically done.
2216 perform_scan = !mark_bitmap->Set(to_ref);
2217 }
2218 } else {
2219 perform_scan = true;
2220 }
2221 }
2222 if (perform_scan) {
2223 if (use_generational_cc_ && young_gen_) {
2224 Scan<true>(to_ref);
2225 } else {
2226 Scan<false>(to_ref);
2227 }
2228 }
2229 if (kUseBakerReadBarrier) {
2230 DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState())
2231 << " to_ref=" << to_ref
2232 << " rb_state=" << to_ref->GetReadBarrierState()
2233 << " is_marked=" << IsMarked(to_ref)
2234 << " type=" << to_ref->PrettyTypeOf()
2235 << " young_gen=" << std::boolalpha << young_gen_ << std::noboolalpha
2236 << " space=" << heap_->DumpSpaceNameFromAddress(to_ref)
2237 << " region_type=" << rtype
2238 // TODO: Temporary; remove this when this is no longer needed (b/116087961).
2239 << " runtime->sentinel=" << Runtime::Current()->GetSentinel().Read<kWithoutReadBarrier>();
2240 }
2241 #ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
2242 mirror::Object* referent = nullptr;
2243 if (UNLIKELY((to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
2244 (referent = to_ref->AsReference()->GetReferent<kWithoutReadBarrier>()) != nullptr &&
2245 !IsInToSpace(referent)))) {
2246 // Leave this reference gray in the queue so that GetReferent() will trigger a read barrier. We
2247 // will change it to non-gray later in ReferenceQueue::DisableReadBarrierForReference.
2248 DCHECK(to_ref->AsReference()->GetPendingNext() != nullptr)
2249 << "Left unenqueued ref gray " << to_ref;
2250 } else {
2251 // We may occasionally leave a reference non-gray in the queue if its referent happens to be
2252 // concurrently marked after the Scan() call above has enqueued the Reference, in which case the
2253 // above IsInToSpace() evaluates to true and we change the color from gray to non-gray here in
2254 // this else block.
2255 if (kUseBakerReadBarrier) {
2256 bool success = to_ref->AtomicSetReadBarrierState<std::memory_order_release>(
2257 ReadBarrier::GrayState(),
2258 ReadBarrier::NonGrayState());
2259 DCHECK(success) << "Must succeed as we won the race.";
2260 }
2261 }
2262 #else
2263 DCHECK(!kUseBakerReadBarrier);
2264 #endif
2265
2266 if (add_to_live_bytes) {
2267 // Add to the live bytes per unevacuated from-space. Note this code is always run by the
2268 // GC-running thread (no synchronization required).
2269 DCHECK(region_space_bitmap_->Test(to_ref));
2270 size_t obj_size = to_ref->SizeOf<kDefaultVerifyFlags>();
2271 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
2272 region_space_->AddLiveBytes(to_ref, alloc_size);
2273 }
2274 if (ReadBarrier::kEnableToSpaceInvariantChecks) {
2275 CHECK(to_ref != nullptr);
2276 space::RegionSpace* region_space = RegionSpace();
2277 CHECK(!region_space->IsInFromSpace(to_ref)) << "Scanning object " << to_ref << " in from space";
2278 AssertToSpaceInvariant(nullptr, MemberOffset(0), to_ref);
2279 AssertToSpaceInvariantFieldVisitor visitor(this);
2280 to_ref->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
2281 visitor,
2282 visitor);
2283 }
2284 }
2285
2286 class ConcurrentCopying::DisableWeakRefAccessCallback : public Closure {
2287 public:
DisableWeakRefAccessCallback(ConcurrentCopying * concurrent_copying)2288 explicit DisableWeakRefAccessCallback(ConcurrentCopying* concurrent_copying)
2289 : concurrent_copying_(concurrent_copying) {
2290 }
2291
Run(Thread * self ATTRIBUTE_UNUSED)2292 void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
2293 // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
2294 // to avoid a deadlock b/31500969.
2295 CHECK(concurrent_copying_->weak_ref_access_enabled_);
2296 concurrent_copying_->weak_ref_access_enabled_ = false;
2297 }
2298
2299 private:
2300 ConcurrentCopying* const concurrent_copying_;
2301 };
2302
SwitchToSharedMarkStackMode()2303 void ConcurrentCopying::SwitchToSharedMarkStackMode() {
2304 Thread* self = Thread::Current();
2305 DCHECK(thread_running_gc_ != nullptr);
2306 DCHECK(self == thread_running_gc_);
2307 DCHECK(thread_running_gc_->GetThreadLocalMarkStack() == nullptr);
2308 MarkStackMode before_mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed);
2309 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
2310 static_cast<uint32_t>(kMarkStackModeThreadLocal));
2311 mark_stack_mode_.store(kMarkStackModeShared, std::memory_order_relaxed);
2312 DisableWeakRefAccessCallback dwrac(this);
2313 // Process the thread local mark stacks one last time after switching to the shared mark stack
2314 // mode and disable weak ref accesses.
2315 ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ true,
2316 &dwrac,
2317 [this] (mirror::Object* ref)
2318 REQUIRES_SHARED(Locks::mutator_lock_) {
2319 ProcessMarkStackRef(ref);
2320 });
2321 if (kVerboseMode) {
2322 LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
2323 }
2324 }
2325
SwitchToGcExclusiveMarkStackMode()2326 void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() {
2327 Thread* self = Thread::Current();
2328 DCHECK(thread_running_gc_ != nullptr);
2329 DCHECK(self == thread_running_gc_);
2330 DCHECK(thread_running_gc_->GetThreadLocalMarkStack() == nullptr);
2331 MarkStackMode before_mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed);
2332 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
2333 static_cast<uint32_t>(kMarkStackModeShared));
2334 mark_stack_mode_.store(kMarkStackModeGcExclusive, std::memory_order_relaxed);
2335 QuasiAtomic::ThreadFenceForConstructor();
2336 if (kVerboseMode) {
2337 LOG(INFO) << "Switched to GC exclusive mark stack mode";
2338 }
2339 }
2340
CheckEmptyMarkStack()2341 void ConcurrentCopying::CheckEmptyMarkStack() {
2342 Thread* self = Thread::Current();
2343 DCHECK(thread_running_gc_ != nullptr);
2344 DCHECK(self == thread_running_gc_);
2345 DCHECK(thread_running_gc_->GetThreadLocalMarkStack() == nullptr);
2346 MarkStackMode mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed);
2347 if (mark_stack_mode == kMarkStackModeThreadLocal) {
2348 // Thread-local mark stack mode.
2349 RevokeThreadLocalMarkStacks(false, nullptr);
2350 MutexLock mu(thread_running_gc_, mark_stack_lock_);
2351 if (!revoked_mark_stacks_.empty()) {
2352 for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) {
2353 while (!mark_stack->IsEmpty()) {
2354 mirror::Object* obj = mark_stack->PopBack();
2355 if (kUseBakerReadBarrier) {
2356 uint32_t rb_state = obj->GetReadBarrierState();
2357 LOG(INFO) << "On mark queue : " << obj << " " << obj->PrettyTypeOf() << " rb_state="
2358 << rb_state << " is_marked=" << IsMarked(obj);
2359 } else {
2360 LOG(INFO) << "On mark queue : " << obj << " " << obj->PrettyTypeOf()
2361 << " is_marked=" << IsMarked(obj);
2362 }
2363 }
2364 }
2365 LOG(FATAL) << "mark stack is not empty";
2366 }
2367 } else {
2368 // Shared, GC-exclusive, or off.
2369 MutexLock mu(thread_running_gc_, mark_stack_lock_);
2370 CHECK(gc_mark_stack_->IsEmpty());
2371 CHECK(revoked_mark_stacks_.empty());
2372 }
2373 }
2374
SweepSystemWeaks(Thread * self)2375 void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
2376 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings());
2377 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
2378 Runtime::Current()->SweepSystemWeaks(this);
2379 }
2380
Sweep(bool swap_bitmaps)2381 void ConcurrentCopying::Sweep(bool swap_bitmaps) {
2382 if (use_generational_cc_ && young_gen_) {
2383 // Only sweep objects on the live stack.
2384 SweepArray(heap_->GetLiveStack(), /* swap_bitmaps= */ false);
2385 } else {
2386 {
2387 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
2388 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
2389 if (kEnableFromSpaceAccountingCheck) {
2390 // Ensure that nobody inserted items in the live stack after we swapped the stacks.
2391 CHECK_GE(live_stack_freeze_size_, live_stack->Size());
2392 }
2393 heap_->MarkAllocStackAsLive(live_stack);
2394 live_stack->Reset();
2395 }
2396 CheckEmptyMarkStack();
2397 TimingLogger::ScopedTiming split("Sweep", GetTimings());
2398 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
2399 if (space->IsContinuousMemMapAllocSpace() && space != region_space_
2400 && !immune_spaces_.ContainsSpace(space)) {
2401 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
2402 TimingLogger::ScopedTiming split2(
2403 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
2404 RecordFree(alloc_space->Sweep(swap_bitmaps));
2405 }
2406 }
2407 SweepLargeObjects(swap_bitmaps);
2408 }
2409 }
2410
2411 // Copied and adapted from MarkSweep::SweepArray.
SweepArray(accounting::ObjectStack * allocations,bool swap_bitmaps)2412 void ConcurrentCopying::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
2413 // This method is only used when Generational CC collection is enabled.
2414 DCHECK(use_generational_cc_);
2415 CheckEmptyMarkStack();
2416 TimingLogger::ScopedTiming t("SweepArray", GetTimings());
2417 Thread* self = Thread::Current();
2418 mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>(
2419 sweep_array_free_buffer_mem_map_.BaseBegin());
2420 size_t chunk_free_pos = 0;
2421 ObjectBytePair freed;
2422 ObjectBytePair freed_los;
2423 // How many objects are left in the array, modified after each space is swept.
2424 StackReference<mirror::Object>* objects = allocations->Begin();
2425 size_t count = allocations->Size();
2426 // Start by sweeping the continuous spaces.
2427 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
2428 if (!space->IsAllocSpace() ||
2429 space == region_space_ ||
2430 immune_spaces_.ContainsSpace(space) ||
2431 space->GetLiveBitmap() == nullptr) {
2432 continue;
2433 }
2434 space::AllocSpace* alloc_space = space->AsAllocSpace();
2435 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
2436 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
2437 if (swap_bitmaps) {
2438 std::swap(live_bitmap, mark_bitmap);
2439 }
2440 StackReference<mirror::Object>* out = objects;
2441 for (size_t i = 0; i < count; ++i) {
2442 mirror::Object* const obj = objects[i].AsMirrorPtr();
2443 if (kUseThreadLocalAllocationStack && obj == nullptr) {
2444 continue;
2445 }
2446 if (space->HasAddress(obj)) {
2447 // This object is in the space, remove it from the array and add it to the sweep buffer
2448 // if needed.
2449 if (!mark_bitmap->Test(obj)) {
2450 if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
2451 TimingLogger::ScopedTiming t2("FreeList", GetTimings());
2452 freed.objects += chunk_free_pos;
2453 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
2454 chunk_free_pos = 0;
2455 }
2456 chunk_free_buffer[chunk_free_pos++] = obj;
2457 }
2458 } else {
2459 (out++)->Assign(obj);
2460 }
2461 }
2462 if (chunk_free_pos > 0) {
2463 TimingLogger::ScopedTiming t2("FreeList", GetTimings());
2464 freed.objects += chunk_free_pos;
2465 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
2466 chunk_free_pos = 0;
2467 }
2468 // All of the references which space contained are no longer in the allocation stack, update
2469 // the count.
2470 count = out - objects;
2471 }
2472 // Handle the large object space.
2473 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
2474 if (large_object_space != nullptr) {
2475 accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
2476 accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
2477 if (swap_bitmaps) {
2478 std::swap(large_live_objects, large_mark_objects);
2479 }
2480 for (size_t i = 0; i < count; ++i) {
2481 mirror::Object* const obj = objects[i].AsMirrorPtr();
2482 // Handle large objects.
2483 if (kUseThreadLocalAllocationStack && obj == nullptr) {
2484 continue;
2485 }
2486 if (!large_mark_objects->Test(obj)) {
2487 ++freed_los.objects;
2488 freed_los.bytes += large_object_space->Free(self, obj);
2489 }
2490 }
2491 }
2492 {
2493 TimingLogger::ScopedTiming t2("RecordFree", GetTimings());
2494 RecordFree(freed);
2495 RecordFreeLOS(freed_los);
2496 t2.NewTiming("ResetStack");
2497 allocations->Reset();
2498 }
2499 sweep_array_free_buffer_mem_map_.MadviseDontNeedAndZero();
2500 }
2501
MarkZygoteLargeObjects()2502 void ConcurrentCopying::MarkZygoteLargeObjects() {
2503 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
2504 Thread* const self = Thread::Current();
2505 WriterMutexLock rmu(self, *Locks::heap_bitmap_lock_);
2506 space::LargeObjectSpace* const los = heap_->GetLargeObjectsSpace();
2507 if (los != nullptr) {
2508 // Pick the current live bitmap (mark bitmap if swapped).
2509 accounting::LargeObjectBitmap* const live_bitmap = los->GetLiveBitmap();
2510 accounting::LargeObjectBitmap* const mark_bitmap = los->GetMarkBitmap();
2511 // Walk through all of the objects and explicitly mark the zygote ones so they don't get swept.
2512 std::pair<uint8_t*, uint8_t*> range = los->GetBeginEndAtomic();
2513 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(range.first),
2514 reinterpret_cast<uintptr_t>(range.second),
2515 [mark_bitmap, los, self](mirror::Object* obj)
2516 REQUIRES(Locks::heap_bitmap_lock_)
2517 REQUIRES_SHARED(Locks::mutator_lock_) {
2518 if (los->IsZygoteLargeObject(self, obj)) {
2519 mark_bitmap->Set(obj);
2520 }
2521 });
2522 }
2523 }
2524
SweepLargeObjects(bool swap_bitmaps)2525 void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) {
2526 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
2527 if (heap_->GetLargeObjectsSpace() != nullptr) {
2528 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
2529 }
2530 }
2531
CaptureRssAtPeak()2532 void ConcurrentCopying::CaptureRssAtPeak() {
2533 using range_t = std::pair<void*, void*>;
2534 // This operation is expensive as several calls to mincore() are performed.
2535 // Also, this must be called before clearing regions in ReclaimPhase().
2536 // Therefore, we make it conditional on the flag that enables dumping GC
2537 // performance info on shutdown.
2538 if (Runtime::Current()->GetDumpGCPerformanceOnShutdown()) {
2539 std::list<range_t> gc_ranges;
2540 auto add_gc_range = [&gc_ranges](void* start, size_t size) {
2541 void* end = static_cast<char*>(start) + RoundUp(size, kPageSize);
2542 gc_ranges.emplace_back(range_t(start, end));
2543 };
2544
2545 // region space
2546 DCHECK(IsAligned<kPageSize>(region_space_->Limit()));
2547 gc_ranges.emplace_back(range_t(region_space_->Begin(), region_space_->Limit()));
2548 // mark bitmap
2549 add_gc_range(region_space_bitmap_->Begin(), region_space_bitmap_->Size());
2550
2551 // non-moving space
2552 {
2553 DCHECK(IsAligned<kPageSize>(heap_->non_moving_space_->Limit()));
2554 gc_ranges.emplace_back(range_t(heap_->non_moving_space_->Begin(),
2555 heap_->non_moving_space_->Limit()));
2556 // mark bitmap
2557 accounting::ContinuousSpaceBitmap *bitmap = heap_->non_moving_space_->GetMarkBitmap();
2558 add_gc_range(bitmap->Begin(), bitmap->Size());
2559 // live bitmap. Deal with bound bitmaps.
2560 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2561 if (heap_->non_moving_space_->HasBoundBitmaps()) {
2562 DCHECK_EQ(bitmap, heap_->non_moving_space_->GetLiveBitmap());
2563 bitmap = heap_->non_moving_space_->GetTempBitmap();
2564 } else {
2565 bitmap = heap_->non_moving_space_->GetLiveBitmap();
2566 }
2567 add_gc_range(bitmap->Begin(), bitmap->Size());
2568 }
2569 // large-object space
2570 if (heap_->GetLargeObjectsSpace()) {
2571 heap_->GetLargeObjectsSpace()->ForEachMemMap([&add_gc_range](const MemMap& map) {
2572 DCHECK(IsAligned<kPageSize>(map.BaseSize()));
2573 add_gc_range(map.BaseBegin(), map.BaseSize());
2574 });
2575 // mark bitmap
2576 accounting::LargeObjectBitmap* bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
2577 add_gc_range(bitmap->Begin(), bitmap->Size());
2578 // live bitmap
2579 bitmap = heap_->GetLargeObjectsSpace()->GetLiveBitmap();
2580 add_gc_range(bitmap->Begin(), bitmap->Size());
2581 }
2582 // card table
2583 add_gc_range(heap_->GetCardTable()->MemMapBegin(), heap_->GetCardTable()->MemMapSize());
2584 // inter-region refs
2585 if (use_generational_cc_ && !young_gen_) {
2586 // region space
2587 add_gc_range(region_space_inter_region_bitmap_->Begin(),
2588 region_space_inter_region_bitmap_->Size());
2589 // non-moving space
2590 add_gc_range(non_moving_space_inter_region_bitmap_->Begin(),
2591 non_moving_space_inter_region_bitmap_->Size());
2592 }
2593 // Extract RSS using mincore(). Updates the cummulative RSS counter.
2594 ExtractRssFromMincore(&gc_ranges);
2595 }
2596 }
2597
ReclaimPhase()2598 void ConcurrentCopying::ReclaimPhase() {
2599 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings());
2600 if (kVerboseMode) {
2601 LOG(INFO) << "GC ReclaimPhase";
2602 }
2603 Thread* self = Thread::Current();
2604
2605 {
2606 // Double-check that the mark stack is empty.
2607 // Note: need to set this after VerifyNoFromSpaceRef().
2608 is_asserting_to_space_invariant_ = false;
2609 QuasiAtomic::ThreadFenceForConstructor();
2610 if (kVerboseMode) {
2611 LOG(INFO) << "Issue an empty check point. ";
2612 }
2613 IssueEmptyCheckpoint();
2614 // Disable the check.
2615 is_mark_stack_push_disallowed_.store(0, std::memory_order_seq_cst);
2616 if (kUseBakerReadBarrier) {
2617 updated_all_immune_objects_.store(false, std::memory_order_seq_cst);
2618 }
2619 CheckEmptyMarkStack();
2620 }
2621
2622 // Capture RSS at the time when memory usage is at its peak. All GC related
2623 // memory ranges like java heap, card table, bitmap etc. are taken into
2624 // account.
2625 // TODO: We can fetch resident memory for region space directly by going
2626 // through list of allocated regions. This way we can avoid calling mincore on
2627 // the biggest memory range, thereby reducing the cost of this function.
2628 CaptureRssAtPeak();
2629
2630 {
2631 // Record freed objects.
2632 TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
2633 // Don't include thread-locals that are in the to-space.
2634 const uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
2635 const uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
2636 const uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
2637 const uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
2638 uint64_t to_bytes = bytes_moved_.load(std::memory_order_relaxed) + bytes_moved_gc_thread_;
2639 cumulative_bytes_moved_.fetch_add(to_bytes, std::memory_order_relaxed);
2640 uint64_t to_objects = objects_moved_.load(std::memory_order_relaxed) + objects_moved_gc_thread_;
2641 cumulative_objects_moved_.fetch_add(to_objects, std::memory_order_relaxed);
2642 if (kEnableFromSpaceAccountingCheck) {
2643 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
2644 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
2645 }
2646 CHECK_LE(to_objects, from_objects);
2647 // to_bytes <= from_bytes is only approximately true, because objects expand a little when
2648 // copying to non-moving space in near-OOM situations.
2649 if (from_bytes > 0) {
2650 copied_live_bytes_ratio_sum_ += static_cast<float>(to_bytes) / from_bytes;
2651 gc_count_++;
2652 }
2653
2654 // Cleared bytes and objects, populated by the call to RegionSpace::ClearFromSpace below.
2655 uint64_t cleared_bytes;
2656 uint64_t cleared_objects;
2657 {
2658 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
2659 region_space_->ClearFromSpace(&cleared_bytes, &cleared_objects, /*clear_bitmap*/ !young_gen_);
2660 // `cleared_bytes` and `cleared_objects` may be greater than the from space equivalents since
2661 // RegionSpace::ClearFromSpace may clear empty unevac regions.
2662 CHECK_GE(cleared_bytes, from_bytes);
2663 CHECK_GE(cleared_objects, from_objects);
2664 }
2665 // freed_bytes could conceivably be negative if we fall back to nonmoving space and have to
2666 // pad to a larger size.
2667 int64_t freed_bytes = (int64_t)cleared_bytes - (int64_t)to_bytes;
2668 uint64_t freed_objects = cleared_objects - to_objects;
2669 if (kVerboseMode) {
2670 LOG(INFO) << "RecordFree:"
2671 << " from_bytes=" << from_bytes << " from_objects=" << from_objects
2672 << " unevac_from_bytes=" << unevac_from_bytes
2673 << " unevac_from_objects=" << unevac_from_objects
2674 << " to_bytes=" << to_bytes << " to_objects=" << to_objects
2675 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects
2676 << " from_space size=" << region_space_->FromSpaceSize()
2677 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
2678 << " to_space size=" << region_space_->ToSpaceSize();
2679 LOG(INFO) << "(before) num_bytes_allocated="
2680 << heap_->num_bytes_allocated_.load();
2681 }
2682 RecordFree(ObjectBytePair(freed_objects, freed_bytes));
2683 if (kVerboseMode) {
2684 LOG(INFO) << "(after) num_bytes_allocated="
2685 << heap_->num_bytes_allocated_.load();
2686 }
2687
2688 float reclaimed_bytes_ratio = static_cast<float>(freed_bytes) / num_bytes_allocated_before_gc_;
2689 reclaimed_bytes_ratio_sum_ += reclaimed_bytes_ratio;
2690 }
2691
2692 {
2693 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2694 Sweep(/* swap_bitmaps= */ false);
2695 SwapBitmaps();
2696 heap_->UnBindBitmaps();
2697
2698 // The bitmap was cleared at the start of the GC, there is nothing we need to do here.
2699 DCHECK(region_space_bitmap_ != nullptr);
2700 region_space_bitmap_ = nullptr;
2701 }
2702
2703 CheckEmptyMarkStack();
2704
2705 if (heap_->dump_region_info_after_gc_) {
2706 LOG(INFO) << "time=" << region_space_->Time();
2707 region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
2708 }
2709
2710 if (kVerboseMode) {
2711 LOG(INFO) << "GC end of ReclaimPhase";
2712 }
2713 }
2714
DumpReferenceInfo(mirror::Object * ref,const char * ref_name,const char * indent)2715 std::string ConcurrentCopying::DumpReferenceInfo(mirror::Object* ref,
2716 const char* ref_name,
2717 const char* indent) {
2718 std::ostringstream oss;
2719 oss << indent << heap_->GetVerification()->DumpObjectInfo(ref, ref_name) << '\n';
2720 if (ref != nullptr) {
2721 if (kUseBakerReadBarrier) {
2722 oss << indent << ref_name << "->GetMarkBit()=" << ref->GetMarkBit() << '\n';
2723 oss << indent << ref_name << "->GetReadBarrierState()=" << ref->GetReadBarrierState() << '\n';
2724 }
2725 }
2726 if (region_space_->HasAddress(ref)) {
2727 oss << indent << "Region containing " << ref_name << ":" << '\n';
2728 region_space_->DumpRegionForObject(oss, ref);
2729 if (region_space_bitmap_ != nullptr) {
2730 oss << indent << "region_space_bitmap_->Test(" << ref_name << ")="
2731 << std::boolalpha << region_space_bitmap_->Test(ref) << std::noboolalpha;
2732 }
2733 }
2734 return oss.str();
2735 }
2736
DumpHeapReference(mirror::Object * obj,MemberOffset offset,mirror::Object * ref)2737 std::string ConcurrentCopying::DumpHeapReference(mirror::Object* obj,
2738 MemberOffset offset,
2739 mirror::Object* ref) {
2740 std::ostringstream oss;
2741 constexpr const char* kIndent = " ";
2742 oss << kIndent << "Invalid reference: ref=" << ref
2743 << " referenced from: object=" << obj << " offset= " << offset << '\n';
2744 // Information about `obj`.
2745 oss << DumpReferenceInfo(obj, "obj", kIndent) << '\n';
2746 // Information about `ref`.
2747 oss << DumpReferenceInfo(ref, "ref", kIndent);
2748 return oss.str();
2749 }
2750
AssertToSpaceInvariant(mirror::Object * obj,MemberOffset offset,mirror::Object * ref)2751 void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj,
2752 MemberOffset offset,
2753 mirror::Object* ref) {
2754 CHECK_EQ(heap_->collector_type_, kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
2755 if (is_asserting_to_space_invariant_) {
2756 if (ref == nullptr) {
2757 // OK.
2758 return;
2759 } else if (region_space_->HasAddress(ref)) {
2760 // Check to-space invariant in region space (moving space).
2761 using RegionType = space::RegionSpace::RegionType;
2762 space::RegionSpace::RegionType type = region_space_->GetRegionTypeUnsafe(ref);
2763 if (type == RegionType::kRegionTypeToSpace) {
2764 // OK.
2765 return;
2766 } else if (type == RegionType::kRegionTypeUnevacFromSpace) {
2767 if (!IsMarkedInUnevacFromSpace(ref)) {
2768 LOG(FATAL_WITHOUT_ABORT) << "Found unmarked reference in unevac from-space:";
2769 // Remove memory protection from the region space and log debugging information.
2770 region_space_->Unprotect();
2771 LOG(FATAL_WITHOUT_ABORT) << DumpHeapReference(obj, offset, ref);
2772 Thread::Current()->DumpJavaStack(LOG_STREAM(FATAL_WITHOUT_ABORT));
2773 }
2774 CHECK(IsMarkedInUnevacFromSpace(ref)) << ref;
2775 } else {
2776 // Not OK: either a from-space ref or a reference in an unused region.
2777 if (type == RegionType::kRegionTypeFromSpace) {
2778 LOG(FATAL_WITHOUT_ABORT) << "Found from-space reference:";
2779 } else {
2780 LOG(FATAL_WITHOUT_ABORT) << "Found reference in region with type " << type << ":";
2781 }
2782 // Remove memory protection from the region space and log debugging information.
2783 region_space_->Unprotect();
2784 LOG(FATAL_WITHOUT_ABORT) << DumpHeapReference(obj, offset, ref);
2785 if (obj != nullptr) {
2786 LogFromSpaceRefHolder(obj, offset);
2787 LOG(FATAL_WITHOUT_ABORT) << "UNEVAC " << region_space_->IsInUnevacFromSpace(obj) << " "
2788 << obj << " " << obj->GetMarkBit();
2789 if (region_space_->HasAddress(obj)) {
2790 region_space_->DumpRegionForObject(LOG_STREAM(FATAL_WITHOUT_ABORT), obj);
2791 }
2792 LOG(FATAL_WITHOUT_ABORT) << "CARD " << static_cast<size_t>(
2793 *Runtime::Current()->GetHeap()->GetCardTable()->CardFromAddr(
2794 reinterpret_cast<uint8_t*>(obj)));
2795 if (region_space_->HasAddress(obj)) {
2796 LOG(FATAL_WITHOUT_ABORT) << "BITMAP " << region_space_bitmap_->Test(obj);
2797 } else {
2798 accounting::ContinuousSpaceBitmap* mark_bitmap =
2799 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
2800 if (mark_bitmap != nullptr) {
2801 LOG(FATAL_WITHOUT_ABORT) << "BITMAP " << mark_bitmap->Test(obj);
2802 } else {
2803 accounting::LargeObjectBitmap* los_bitmap =
2804 heap_mark_bitmap_->GetLargeObjectBitmap(obj);
2805 LOG(FATAL_WITHOUT_ABORT) << "BITMAP " << los_bitmap->Test(obj);
2806 }
2807 }
2808 }
2809 ref->GetLockWord(false).Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
2810 LOG(FATAL_WITHOUT_ABORT) << "Non-free regions:";
2811 region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
2812 PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
2813 MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true);
2814 LOG(FATAL) << "Invalid reference " << ref
2815 << " referenced from object " << obj << " at offset " << offset;
2816 }
2817 } else {
2818 // Check to-space invariant in non-moving space.
2819 AssertToSpaceInvariantInNonMovingSpace(obj, ref);
2820 }
2821 }
2822 }
2823
2824 class RootPrinter {
2825 public:
RootPrinter()2826 RootPrinter() { }
2827
2828 template <class MirrorType>
VisitRootIfNonNull(mirror::CompressedReference<MirrorType> * root)2829 ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
2830 REQUIRES_SHARED(Locks::mutator_lock_) {
2831 if (!root->IsNull()) {
2832 VisitRoot(root);
2833 }
2834 }
2835
2836 template <class MirrorType>
VisitRoot(mirror::Object ** root)2837 void VisitRoot(mirror::Object** root)
2838 REQUIRES_SHARED(Locks::mutator_lock_) {
2839 LOG(FATAL_WITHOUT_ABORT) << "root=" << root << " ref=" << *root;
2840 }
2841
2842 template <class MirrorType>
VisitRoot(mirror::CompressedReference<MirrorType> * root)2843 void VisitRoot(mirror::CompressedReference<MirrorType>* root)
2844 REQUIRES_SHARED(Locks::mutator_lock_) {
2845 LOG(FATAL_WITHOUT_ABORT) << "root=" << root << " ref=" << root->AsMirrorPtr();
2846 }
2847 };
2848
DumpGcRoot(mirror::Object * ref)2849 std::string ConcurrentCopying::DumpGcRoot(mirror::Object* ref) {
2850 std::ostringstream oss;
2851 constexpr const char* kIndent = " ";
2852 oss << kIndent << "Invalid GC root: ref=" << ref << '\n';
2853 // Information about `ref`.
2854 oss << DumpReferenceInfo(ref, "ref", kIndent);
2855 return oss.str();
2856 }
2857
AssertToSpaceInvariant(GcRootSource * gc_root_source,mirror::Object * ref)2858 void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source,
2859 mirror::Object* ref) {
2860 CHECK_EQ(heap_->collector_type_, kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
2861 if (is_asserting_to_space_invariant_) {
2862 if (ref == nullptr) {
2863 // OK.
2864 return;
2865 } else if (region_space_->HasAddress(ref)) {
2866 // Check to-space invariant in region space (moving space).
2867 using RegionType = space::RegionSpace::RegionType;
2868 space::RegionSpace::RegionType type = region_space_->GetRegionTypeUnsafe(ref);
2869 if (type == RegionType::kRegionTypeToSpace) {
2870 // OK.
2871 return;
2872 } else if (type == RegionType::kRegionTypeUnevacFromSpace) {
2873 if (!IsMarkedInUnevacFromSpace(ref)) {
2874 LOG(FATAL_WITHOUT_ABORT) << "Found unmarked reference in unevac from-space:";
2875 // Remove memory protection from the region space and log debugging information.
2876 region_space_->Unprotect();
2877 LOG(FATAL_WITHOUT_ABORT) << DumpGcRoot(ref);
2878 }
2879 CHECK(IsMarkedInUnevacFromSpace(ref)) << ref;
2880 } else {
2881 // Not OK: either a from-space ref or a reference in an unused region.
2882 if (type == RegionType::kRegionTypeFromSpace) {
2883 LOG(FATAL_WITHOUT_ABORT) << "Found from-space reference:";
2884 } else {
2885 LOG(FATAL_WITHOUT_ABORT) << "Found reference in region with type " << type << ":";
2886 }
2887 // Remove memory protection from the region space and log debugging information.
2888 region_space_->Unprotect();
2889 LOG(FATAL_WITHOUT_ABORT) << DumpGcRoot(ref);
2890 if (gc_root_source == nullptr) {
2891 // No info.
2892 } else if (gc_root_source->HasArtField()) {
2893 ArtField* field = gc_root_source->GetArtField();
2894 LOG(FATAL_WITHOUT_ABORT) << "gc root in field " << field << " "
2895 << ArtField::PrettyField(field);
2896 RootPrinter root_printer;
2897 field->VisitRoots(root_printer);
2898 } else if (gc_root_source->HasArtMethod()) {
2899 ArtMethod* method = gc_root_source->GetArtMethod();
2900 LOG(FATAL_WITHOUT_ABORT) << "gc root in method " << method << " "
2901 << ArtMethod::PrettyMethod(method);
2902 RootPrinter root_printer;
2903 method->VisitRoots(root_printer, kRuntimePointerSize);
2904 }
2905 ref->GetLockWord(false).Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
2906 LOG(FATAL_WITHOUT_ABORT) << "Non-free regions:";
2907 region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
2908 PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
2909 MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true);
2910 LOG(FATAL) << "Invalid reference " << ref;
2911 }
2912 } else {
2913 // Check to-space invariant in non-moving space.
2914 AssertToSpaceInvariantInNonMovingSpace(/* obj= */ nullptr, ref);
2915 }
2916 }
2917 }
2918
LogFromSpaceRefHolder(mirror::Object * obj,MemberOffset offset)2919 void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) {
2920 if (kUseBakerReadBarrier) {
2921 LOG(INFO) << "holder=" << obj << " " << obj->PrettyTypeOf()
2922 << " holder rb_state=" << obj->GetReadBarrierState();
2923 } else {
2924 LOG(INFO) << "holder=" << obj << " " << obj->PrettyTypeOf();
2925 }
2926 if (region_space_->IsInFromSpace(obj)) {
2927 LOG(INFO) << "holder is in the from-space.";
2928 } else if (region_space_->IsInToSpace(obj)) {
2929 LOG(INFO) << "holder is in the to-space.";
2930 } else if (region_space_->IsInUnevacFromSpace(obj)) {
2931 LOG(INFO) << "holder is in the unevac from-space.";
2932 if (IsMarkedInUnevacFromSpace(obj)) {
2933 LOG(INFO) << "holder is marked in the region space bitmap.";
2934 } else {
2935 LOG(INFO) << "holder is not marked in the region space bitmap.";
2936 }
2937 } else {
2938 // In a non-moving space.
2939 if (immune_spaces_.ContainsObject(obj)) {
2940 LOG(INFO) << "holder is in an immune image or the zygote space.";
2941 } else {
2942 LOG(INFO) << "holder is in a non-immune, non-moving (or main) space.";
2943 accounting::ContinuousSpaceBitmap* mark_bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
2944 accounting::LargeObjectBitmap* los_bitmap = nullptr;
2945 const bool is_los = !mark_bitmap->HasAddress(obj);
2946 if (is_los) {
2947 DCHECK(heap_->GetLargeObjectsSpace() && heap_->GetLargeObjectsSpace()->Contains(obj))
2948 << "obj=" << obj
2949 << " LOS bit map covers the entire lower 4GB address range";
2950 los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
2951 }
2952 if (!is_los && mark_bitmap->Test(obj)) {
2953 LOG(INFO) << "holder is marked in the non-moving space mark bit map.";
2954 } else if (is_los && los_bitmap->Test(obj)) {
2955 LOG(INFO) << "holder is marked in the los bit map.";
2956 } else {
2957 // If ref is on the allocation stack, then it is considered
2958 // mark/alive (but not necessarily on the live stack.)
2959 if (IsOnAllocStack(obj)) {
2960 LOG(INFO) << "holder is on the alloc stack.";
2961 } else {
2962 LOG(INFO) << "holder is not marked or on the alloc stack.";
2963 }
2964 }
2965 }
2966 }
2967 LOG(INFO) << "offset=" << offset.SizeValue();
2968 }
2969
IsMarkedInNonMovingSpace(mirror::Object * from_ref)2970 bool ConcurrentCopying::IsMarkedInNonMovingSpace(mirror::Object* from_ref) {
2971 DCHECK(!region_space_->HasAddress(from_ref)) << "ref=" << from_ref;
2972 DCHECK(!immune_spaces_.ContainsObject(from_ref)) << "ref=" << from_ref;
2973 if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
2974 return true;
2975 } else if (!use_generational_cc_ || done_scanning_.load(std::memory_order_acquire)) {
2976 // Read the comment in IsMarkedInUnevacFromSpace()
2977 accounting::ContinuousSpaceBitmap* mark_bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
2978 accounting::LargeObjectBitmap* los_bitmap = nullptr;
2979 const bool is_los = !mark_bitmap->HasAddress(from_ref);
2980 if (is_los) {
2981 DCHECK(heap_->GetLargeObjectsSpace() && heap_->GetLargeObjectsSpace()->Contains(from_ref))
2982 << "ref=" << from_ref
2983 << " doesn't belong to non-moving space and large object space doesn't exist";
2984 los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
2985 }
2986 if (is_los ? los_bitmap->Test(from_ref) : mark_bitmap->Test(from_ref)) {
2987 return true;
2988 }
2989 }
2990 return IsOnAllocStack(from_ref);
2991 }
2992
AssertToSpaceInvariantInNonMovingSpace(mirror::Object * obj,mirror::Object * ref)2993 void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj,
2994 mirror::Object* ref) {
2995 CHECK(ref != nullptr);
2996 CHECK(!region_space_->HasAddress(ref)) << "obj=" << obj << " ref=" << ref;
2997 // In a non-moving space. Check that the ref is marked.
2998 if (immune_spaces_.ContainsObject(ref)) {
2999 // Immune space case.
3000 if (kUseBakerReadBarrier) {
3001 // Immune object may not be gray if called from the GC.
3002 if (Thread::Current() == thread_running_gc_ && !gc_grays_immune_objects_) {
3003 return;
3004 }
3005 bool updated_all_immune_objects = updated_all_immune_objects_.load(std::memory_order_seq_cst);
3006 CHECK(updated_all_immune_objects || ref->GetReadBarrierState() == ReadBarrier::GrayState())
3007 << "Unmarked immune space ref. obj=" << obj << " rb_state="
3008 << (obj != nullptr ? obj->GetReadBarrierState() : 0U)
3009 << " ref=" << ref << " ref rb_state=" << ref->GetReadBarrierState()
3010 << " updated_all_immune_objects=" << updated_all_immune_objects;
3011 }
3012 } else {
3013 // Non-moving space and large-object space (LOS) cases.
3014 // If `ref` is on the allocation stack, then it may not be
3015 // marked live, but considered marked/alive (but not
3016 // necessarily on the live stack).
3017 CHECK(IsMarkedInNonMovingSpace(ref))
3018 << "Unmarked ref that's not on the allocation stack."
3019 << " obj=" << obj
3020 << " ref=" << ref
3021 << " rb_state=" << ref->GetReadBarrierState()
3022 << " is_marking=" << std::boolalpha << is_marking_ << std::noboolalpha
3023 << " young_gen=" << std::boolalpha << young_gen_ << std::noboolalpha
3024 << " done_scanning="
3025 << std::boolalpha << done_scanning_.load(std::memory_order_acquire) << std::noboolalpha
3026 << " self=" << Thread::Current();
3027 }
3028 }
3029
3030 // Used to scan ref fields of an object.
3031 template <bool kNoUnEvac>
3032 class ConcurrentCopying::RefFieldsVisitor {
3033 public:
RefFieldsVisitor(ConcurrentCopying * collector,Thread * const thread)3034 explicit RefFieldsVisitor(ConcurrentCopying* collector, Thread* const thread)
3035 : collector_(collector), thread_(thread) {
3036 // Cannot have `kNoUnEvac` when Generational CC collection is disabled.
3037 DCHECK(!kNoUnEvac || collector_->use_generational_cc_);
3038 }
3039
operator ()(mirror::Object * obj,MemberOffset offset,bool) const3040 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
3041 const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
3042 REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
3043 collector_->Process<kNoUnEvac>(obj, offset);
3044 }
3045
operator ()(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref) const3046 void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
3047 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
3048 CHECK(klass->IsTypeOfReferenceClass());
3049 collector_->DelayReferenceReferent(klass, ref);
3050 }
3051
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const3052 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
3053 ALWAYS_INLINE
3054 REQUIRES_SHARED(Locks::mutator_lock_) {
3055 if (!root->IsNull()) {
3056 VisitRoot(root);
3057 }
3058 }
3059
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const3060 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
3061 ALWAYS_INLINE
3062 REQUIRES_SHARED(Locks::mutator_lock_) {
3063 collector_->MarkRoot</*kGrayImmuneObject=*/false>(thread_, root);
3064 }
3065
3066 private:
3067 ConcurrentCopying* const collector_;
3068 Thread* const thread_;
3069 };
3070
3071 template <bool kNoUnEvac>
Scan(mirror::Object * to_ref)3072 inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
3073 // Cannot have `kNoUnEvac` when Generational CC collection is disabled.
3074 DCHECK(!kNoUnEvac || use_generational_cc_);
3075 if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
3076 // Avoid all read barriers during visit references to help performance.
3077 // Don't do this in transaction mode because we may read the old value of an field which may
3078 // trigger read barriers.
3079 Thread::Current()->ModifyDebugDisallowReadBarrier(1);
3080 }
3081 DCHECK(!region_space_->IsInFromSpace(to_ref));
3082 DCHECK_EQ(Thread::Current(), thread_running_gc_);
3083 RefFieldsVisitor<kNoUnEvac> visitor(this, thread_running_gc_);
3084 // Disable the read barrier for a performance reason.
3085 to_ref->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
3086 visitor, visitor);
3087 if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
3088 thread_running_gc_->ModifyDebugDisallowReadBarrier(-1);
3089 }
3090 }
3091
3092 template <bool kNoUnEvac>
Process(mirror::Object * obj,MemberOffset offset)3093 inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
3094 // Cannot have `kNoUnEvac` when Generational CC collection is disabled.
3095 DCHECK(!kNoUnEvac || use_generational_cc_);
3096 DCHECK_EQ(Thread::Current(), thread_running_gc_);
3097 mirror::Object* ref = obj->GetFieldObject<
3098 mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
3099 mirror::Object* to_ref = Mark</*kGrayImmuneObject=*/false, kNoUnEvac, /*kFromGCThread=*/true>(
3100 thread_running_gc_,
3101 ref,
3102 /*holder=*/ obj,
3103 offset);
3104 if (to_ref == ref) {
3105 return;
3106 }
3107 // This may fail if the mutator writes to the field at the same time. But it's ok.
3108 mirror::Object* expected_ref = ref;
3109 mirror::Object* new_ref = to_ref;
3110 do {
3111 if (expected_ref !=
3112 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) {
3113 // It was updated by the mutator.
3114 break;
3115 }
3116 // Use release CAS to make sure threads reading the reference see contents of copied objects.
3117 } while (!obj->CasFieldObjectWithoutWriteBarrier<false, false, kVerifyNone>(
3118 offset,
3119 expected_ref,
3120 new_ref,
3121 CASMode::kWeak,
3122 std::memory_order_release));
3123 }
3124
3125 // Process some roots.
VisitRoots(mirror::Object *** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)3126 inline void ConcurrentCopying::VisitRoots(
3127 mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
3128 Thread* const self = Thread::Current();
3129 for (size_t i = 0; i < count; ++i) {
3130 mirror::Object** root = roots[i];
3131 mirror::Object* ref = *root;
3132 mirror::Object* to_ref = Mark(self, ref);
3133 if (to_ref == ref) {
3134 continue;
3135 }
3136 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root);
3137 mirror::Object* expected_ref = ref;
3138 mirror::Object* new_ref = to_ref;
3139 do {
3140 if (expected_ref != addr->load(std::memory_order_relaxed)) {
3141 // It was updated by the mutator.
3142 break;
3143 }
3144 } while (!addr->CompareAndSetWeakRelaxed(expected_ref, new_ref));
3145 }
3146 }
3147
3148 template<bool kGrayImmuneObject>
MarkRoot(Thread * const self,mirror::CompressedReference<mirror::Object> * root)3149 inline void ConcurrentCopying::MarkRoot(Thread* const self,
3150 mirror::CompressedReference<mirror::Object>* root) {
3151 DCHECK(!root->IsNull());
3152 mirror::Object* const ref = root->AsMirrorPtr();
3153 mirror::Object* to_ref = Mark<kGrayImmuneObject>(self, ref);
3154 if (to_ref != ref) {
3155 auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
3156 auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
3157 auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
3158 // If the cas fails, then it was updated by the mutator.
3159 do {
3160 if (ref != addr->load(std::memory_order_relaxed).AsMirrorPtr()) {
3161 // It was updated by the mutator.
3162 break;
3163 }
3164 } while (!addr->CompareAndSetWeakRelaxed(expected_ref, new_ref));
3165 }
3166 }
3167
VisitRoots(mirror::CompressedReference<mirror::Object> ** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)3168 inline void ConcurrentCopying::VisitRoots(
3169 mirror::CompressedReference<mirror::Object>** roots, size_t count,
3170 const RootInfo& info ATTRIBUTE_UNUSED) {
3171 Thread* const self = Thread::Current();
3172 for (size_t i = 0; i < count; ++i) {
3173 mirror::CompressedReference<mirror::Object>* const root = roots[i];
3174 if (!root->IsNull()) {
3175 // kGrayImmuneObject is true because this is used for the thread flip.
3176 MarkRoot</*kGrayImmuneObject=*/true>(self, root);
3177 }
3178 }
3179 }
3180
3181 // Temporary set gc_grays_immune_objects_ to true in a scope if the current thread is GC.
3182 class ConcurrentCopying::ScopedGcGraysImmuneObjects {
3183 public:
ScopedGcGraysImmuneObjects(ConcurrentCopying * collector)3184 explicit ScopedGcGraysImmuneObjects(ConcurrentCopying* collector)
3185 : collector_(collector), enabled_(false) {
3186 if (kUseBakerReadBarrier &&
3187 collector_->thread_running_gc_ == Thread::Current() &&
3188 !collector_->gc_grays_immune_objects_) {
3189 collector_->gc_grays_immune_objects_ = true;
3190 enabled_ = true;
3191 }
3192 }
3193
~ScopedGcGraysImmuneObjects()3194 ~ScopedGcGraysImmuneObjects() {
3195 if (kUseBakerReadBarrier &&
3196 collector_->thread_running_gc_ == Thread::Current() &&
3197 enabled_) {
3198 DCHECK(collector_->gc_grays_immune_objects_);
3199 collector_->gc_grays_immune_objects_ = false;
3200 }
3201 }
3202
3203 private:
3204 ConcurrentCopying* const collector_;
3205 bool enabled_;
3206 };
3207
3208 // Fill the given memory block with a dummy object. Used to fill in a
3209 // copy of objects that was lost in race.
FillWithDummyObject(Thread * const self,mirror::Object * dummy_obj,size_t byte_size)3210 void ConcurrentCopying::FillWithDummyObject(Thread* const self,
3211 mirror::Object* dummy_obj,
3212 size_t byte_size) {
3213 // GC doesn't gray immune objects while scanning immune objects. But we need to trigger the read
3214 // barriers here because we need the updated reference to the int array class, etc. Temporary set
3215 // gc_grays_immune_objects_ to true so that we won't cause a DCHECK failure in MarkImmuneSpace().
3216 ScopedGcGraysImmuneObjects scoped_gc_gray_immune_objects(this);
3217 CHECK_ALIGNED(byte_size, kObjectAlignment);
3218 memset(dummy_obj, 0, byte_size);
3219 // Avoid going through read barrier for since kDisallowReadBarrierDuringScan may be enabled.
3220 // Explicitly mark to make sure to get an object in the to-space.
3221 mirror::Class* int_array_class = down_cast<mirror::Class*>(
3222 Mark(self, GetClassRoot<mirror::IntArray, kWithoutReadBarrier>().Ptr()));
3223 CHECK(int_array_class != nullptr);
3224 if (ReadBarrier::kEnableToSpaceInvariantChecks) {
3225 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
3226 }
3227 size_t component_size = int_array_class->GetComponentSize();
3228 CHECK_EQ(component_size, sizeof(int32_t));
3229 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
3230 if (data_offset > byte_size) {
3231 // An int array is too big. Use java.lang.Object.
3232 CHECK(java_lang_Object_ != nullptr);
3233 if (ReadBarrier::kEnableToSpaceInvariantChecks) {
3234 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object_);
3235 }
3236 CHECK_EQ(byte_size, java_lang_Object_->GetObjectSize<kVerifyNone>());
3237 dummy_obj->SetClass(java_lang_Object_);
3238 CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone>()));
3239 } else {
3240 // Use an int array.
3241 dummy_obj->SetClass(int_array_class);
3242 CHECK(dummy_obj->IsArrayInstance<kVerifyNone>());
3243 int32_t length = (byte_size - data_offset) / component_size;
3244 ObjPtr<mirror::Array> dummy_arr = dummy_obj->AsArray<kVerifyNone>();
3245 dummy_arr->SetLength(length);
3246 CHECK_EQ(dummy_arr->GetLength(), length)
3247 << "byte_size=" << byte_size << " length=" << length
3248 << " component_size=" << component_size << " data_offset=" << data_offset;
3249 CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone>()))
3250 << "byte_size=" << byte_size << " length=" << length
3251 << " component_size=" << component_size << " data_offset=" << data_offset;
3252 }
3253 }
3254
3255 // Reuse the memory blocks that were copy of objects that were lost in race.
AllocateInSkippedBlock(Thread * const self,size_t alloc_size)3256 mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(Thread* const self, size_t alloc_size) {
3257 // Try to reuse the blocks that were unused due to CAS failures.
3258 CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment);
3259 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
3260 size_t byte_size;
3261 uint8_t* addr;
3262 {
3263 MutexLock mu(self, skipped_blocks_lock_);
3264 auto it = skipped_blocks_map_.lower_bound(alloc_size);
3265 if (it == skipped_blocks_map_.end()) {
3266 // Not found.
3267 return nullptr;
3268 }
3269 byte_size = it->first;
3270 CHECK_GE(byte_size, alloc_size);
3271 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) {
3272 // If remainder would be too small for a dummy object, retry with a larger request size.
3273 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size);
3274 if (it == skipped_blocks_map_.end()) {
3275 // Not found.
3276 return nullptr;
3277 }
3278 CHECK_ALIGNED(it->first - alloc_size, space::RegionSpace::kAlignment);
3279 CHECK_GE(it->first - alloc_size, min_object_size)
3280 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
3281 }
3282 // Found a block.
3283 CHECK(it != skipped_blocks_map_.end());
3284 byte_size = it->first;
3285 addr = it->second;
3286 CHECK_GE(byte_size, alloc_size);
3287 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
3288 CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment);
3289 if (kVerboseMode) {
3290 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
3291 }
3292 skipped_blocks_map_.erase(it);
3293 }
3294 memset(addr, 0, byte_size);
3295 if (byte_size > alloc_size) {
3296 // Return the remainder to the map.
3297 CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment);
3298 CHECK_GE(byte_size - alloc_size, min_object_size);
3299 // FillWithDummyObject may mark an object, avoid holding skipped_blocks_lock_ to prevent lock
3300 // violation and possible deadlock. The deadlock case is a recursive case:
3301 // FillWithDummyObject -> Mark(IntArray.class) -> Copy -> AllocateInSkippedBlock.
3302 FillWithDummyObject(self,
3303 reinterpret_cast<mirror::Object*>(addr + alloc_size),
3304 byte_size - alloc_size);
3305 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size)));
3306 {
3307 MutexLock mu(self, skipped_blocks_lock_);
3308 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size));
3309 }
3310 }
3311 return reinterpret_cast<mirror::Object*>(addr);
3312 }
3313
Copy(Thread * const self,mirror::Object * from_ref,mirror::Object * holder,MemberOffset offset)3314 mirror::Object* ConcurrentCopying::Copy(Thread* const self,
3315 mirror::Object* from_ref,
3316 mirror::Object* holder,
3317 MemberOffset offset) {
3318 DCHECK(region_space_->IsInFromSpace(from_ref));
3319 // If the class pointer is null, the object is invalid. This could occur for a dangling pointer
3320 // from a previous GC that is either inside or outside the allocated region.
3321 mirror::Class* klass = from_ref->GetClass<kVerifyNone, kWithoutReadBarrier>();
3322 if (UNLIKELY(klass == nullptr)) {
3323 // Remove memory protection from the region space and log debugging information.
3324 region_space_->Unprotect();
3325 heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal= */ true);
3326 }
3327 // There must not be a read barrier to avoid nested RB that might violate the to-space invariant.
3328 // Note that from_ref is a from space ref so the SizeOf() call will access the from-space meta
3329 // objects, but it's ok and necessary.
3330 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags>();
3331 size_t region_space_alloc_size = (obj_size <= space::RegionSpace::kRegionSize)
3332 ? RoundUp(obj_size, space::RegionSpace::kAlignment)
3333 : RoundUp(obj_size, space::RegionSpace::kRegionSize);
3334 size_t region_space_bytes_allocated = 0U;
3335 size_t non_moving_space_bytes_allocated = 0U;
3336 size_t bytes_allocated = 0U;
3337 size_t dummy;
3338 bool fall_back_to_non_moving = false;
3339 mirror::Object* to_ref = region_space_->AllocNonvirtual</*kForEvac=*/ true>(
3340 region_space_alloc_size, ®ion_space_bytes_allocated, nullptr, &dummy);
3341 bytes_allocated = region_space_bytes_allocated;
3342 if (LIKELY(to_ref != nullptr)) {
3343 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
3344 } else {
3345 // Failed to allocate in the region space. Try the skipped blocks.
3346 to_ref = AllocateInSkippedBlock(self, region_space_alloc_size);
3347 if (to_ref != nullptr) {
3348 // Succeeded to allocate in a skipped block.
3349 if (heap_->use_tlab_) {
3350 // This is necessary for the tlab case as it's not accounted in the space.
3351 region_space_->RecordAlloc(to_ref);
3352 }
3353 bytes_allocated = region_space_alloc_size;
3354 heap_->num_bytes_allocated_.fetch_sub(bytes_allocated, std::memory_order_relaxed);
3355 to_space_bytes_skipped_.fetch_sub(bytes_allocated, std::memory_order_relaxed);
3356 to_space_objects_skipped_.fetch_sub(1, std::memory_order_relaxed);
3357 } else {
3358 // Fall back to the non-moving space.
3359 fall_back_to_non_moving = true;
3360 if (kVerboseMode) {
3361 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes="
3362 << to_space_bytes_skipped_.load(std::memory_order_relaxed)
3363 << " skipped_objects="
3364 << to_space_objects_skipped_.load(std::memory_order_relaxed);
3365 }
3366 to_ref = heap_->non_moving_space_->Alloc(self, obj_size,
3367 &non_moving_space_bytes_allocated, nullptr, &dummy);
3368 if (UNLIKELY(to_ref == nullptr)) {
3369 LOG(FATAL_WITHOUT_ABORT) << "Fall-back non-moving space allocation failed for a "
3370 << obj_size << " byte object in region type "
3371 << region_space_->GetRegionType(from_ref);
3372 LOG(FATAL) << "Object address=" << from_ref << " type=" << from_ref->PrettyTypeOf();
3373 }
3374 bytes_allocated = non_moving_space_bytes_allocated;
3375 }
3376 }
3377 DCHECK(to_ref != nullptr);
3378
3379 // Copy the object excluding the lock word since that is handled in the loop.
3380 to_ref->SetClass(klass);
3381 const size_t kObjectHeaderSize = sizeof(mirror::Object);
3382 DCHECK_GE(obj_size, kObjectHeaderSize);
3383 static_assert(kObjectHeaderSize == sizeof(mirror::HeapReference<mirror::Class>) +
3384 sizeof(LockWord),
3385 "Object header size does not match");
3386 // Memcpy can tear for words since it may do byte copy. It is only safe to do this since the
3387 // object in the from space is immutable other than the lock word. b/31423258
3388 memcpy(reinterpret_cast<uint8_t*>(to_ref) + kObjectHeaderSize,
3389 reinterpret_cast<const uint8_t*>(from_ref) + kObjectHeaderSize,
3390 obj_size - kObjectHeaderSize);
3391
3392 // Attempt to install the forward pointer. This is in a loop as the
3393 // lock word atomic write can fail.
3394 while (true) {
3395 LockWord old_lock_word = from_ref->GetLockWord(false);
3396
3397 if (old_lock_word.GetState() == LockWord::kForwardingAddress) {
3398 // Lost the race. Another thread (either GC or mutator) stored
3399 // the forwarding pointer first. Make the lost copy (to_ref)
3400 // look like a valid but dead (dummy) object and keep it for
3401 // future reuse.
3402 FillWithDummyObject(self, to_ref, bytes_allocated);
3403 if (!fall_back_to_non_moving) {
3404 DCHECK(region_space_->IsInToSpace(to_ref));
3405 if (bytes_allocated > space::RegionSpace::kRegionSize) {
3406 // Free the large alloc.
3407 region_space_->FreeLarge</*kForEvac=*/ true>(to_ref, bytes_allocated);
3408 } else {
3409 // Record the lost copy for later reuse.
3410 heap_->num_bytes_allocated_.fetch_add(bytes_allocated, std::memory_order_relaxed);
3411 to_space_bytes_skipped_.fetch_add(bytes_allocated, std::memory_order_relaxed);
3412 to_space_objects_skipped_.fetch_add(1, std::memory_order_relaxed);
3413 MutexLock mu(self, skipped_blocks_lock_);
3414 skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
3415 reinterpret_cast<uint8_t*>(to_ref)));
3416 }
3417 } else {
3418 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
3419 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
3420 // Free the non-moving-space chunk.
3421 heap_->non_moving_space_->Free(self, to_ref);
3422 }
3423
3424 // Get the winner's forward ptr.
3425 mirror::Object* lost_fwd_ptr = to_ref;
3426 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress());
3427 CHECK(to_ref != nullptr);
3428 CHECK_NE(to_ref, lost_fwd_ptr);
3429 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref))
3430 << "to_ref=" << to_ref << " " << heap_->DumpSpaces();
3431 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
3432 return to_ref;
3433 }
3434
3435 // Copy the old lock word over since we did not copy it yet.
3436 to_ref->SetLockWord(old_lock_word, false);
3437 // Set the gray ptr.
3438 if (kUseBakerReadBarrier) {
3439 to_ref->SetReadBarrierState(ReadBarrier::GrayState());
3440 }
3441
3442 // Do a fence to prevent the field CAS in ConcurrentCopying::Process from possibly reordering
3443 // before the object copy.
3444 std::atomic_thread_fence(std::memory_order_release);
3445
3446 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
3447
3448 // Try to atomically write the fwd ptr.
3449 bool success = from_ref->CasLockWord(old_lock_word,
3450 new_lock_word,
3451 CASMode::kWeak,
3452 std::memory_order_relaxed);
3453 if (LIKELY(success)) {
3454 // The CAS succeeded.
3455 DCHECK(thread_running_gc_ != nullptr);
3456 if (LIKELY(self == thread_running_gc_)) {
3457 objects_moved_gc_thread_ += 1;
3458 bytes_moved_gc_thread_ += bytes_allocated;
3459 } else {
3460 objects_moved_.fetch_add(1, std::memory_order_relaxed);
3461 bytes_moved_.fetch_add(bytes_allocated, std::memory_order_relaxed);
3462 }
3463
3464 if (LIKELY(!fall_back_to_non_moving)) {
3465 DCHECK(region_space_->IsInToSpace(to_ref));
3466 } else {
3467 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
3468 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
3469 if (!use_generational_cc_ || !young_gen_) {
3470 // Mark it in the live bitmap.
3471 CHECK(!heap_->non_moving_space_->GetLiveBitmap()->AtomicTestAndSet(to_ref));
3472 }
3473 if (!kUseBakerReadBarrier) {
3474 // Mark it in the mark bitmap.
3475 CHECK(!heap_->non_moving_space_->GetMarkBitmap()->AtomicTestAndSet(to_ref));
3476 }
3477 }
3478 if (kUseBakerReadBarrier) {
3479 DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState());
3480 }
3481 DCHECK(GetFwdPtr(from_ref) == to_ref);
3482 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
3483 PushOntoMarkStack(self, to_ref);
3484 return to_ref;
3485 } else {
3486 // The CAS failed. It may have lost the race or may have failed
3487 // due to monitor/hashcode ops. Either way, retry.
3488 }
3489 }
3490 }
3491
IsMarked(mirror::Object * from_ref)3492 mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) {
3493 DCHECK(from_ref != nullptr);
3494 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
3495 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
3496 // It's already marked.
3497 return from_ref;
3498 }
3499 mirror::Object* to_ref;
3500 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
3501 to_ref = GetFwdPtr(from_ref);
3502 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) ||
3503 heap_->non_moving_space_->HasAddress(to_ref))
3504 << "from_ref=" << from_ref << " to_ref=" << to_ref;
3505 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
3506 if (IsMarkedInUnevacFromSpace(from_ref)) {
3507 to_ref = from_ref;
3508 } else {
3509 to_ref = nullptr;
3510 }
3511 } else {
3512 // At this point, `from_ref` should not be in the region space
3513 // (i.e. within an "unused" region).
3514 DCHECK(!region_space_->HasAddress(from_ref)) << from_ref;
3515 // from_ref is in a non-moving space.
3516 if (immune_spaces_.ContainsObject(from_ref)) {
3517 // An immune object is alive.
3518 to_ref = from_ref;
3519 } else {
3520 // Non-immune non-moving space. Use the mark bitmap.
3521 if (IsMarkedInNonMovingSpace(from_ref)) {
3522 // Already marked.
3523 to_ref = from_ref;
3524 } else {
3525 to_ref = nullptr;
3526 }
3527 }
3528 }
3529 return to_ref;
3530 }
3531
IsOnAllocStack(mirror::Object * ref)3532 bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
3533 // TODO: Explain why this is here. What release operation does it pair with?
3534 std::atomic_thread_fence(std::memory_order_acquire);
3535 accounting::ObjectStack* alloc_stack = GetAllocationStack();
3536 return alloc_stack->Contains(ref);
3537 }
3538
MarkNonMoving(Thread * const self,mirror::Object * ref,mirror::Object * holder,MemberOffset offset)3539 mirror::Object* ConcurrentCopying::MarkNonMoving(Thread* const self,
3540 mirror::Object* ref,
3541 mirror::Object* holder,
3542 MemberOffset offset) {
3543 // ref is in a non-moving space (from_ref == to_ref).
3544 DCHECK(!region_space_->HasAddress(ref)) << ref;
3545 DCHECK(!immune_spaces_.ContainsObject(ref));
3546 // Use the mark bitmap.
3547 accounting::ContinuousSpaceBitmap* mark_bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
3548 accounting::LargeObjectBitmap* los_bitmap = nullptr;
3549 const bool is_los = !mark_bitmap->HasAddress(ref);
3550 if (is_los) {
3551 if (!IsAligned<kPageSize>(ref)) {
3552 // Ref is a large object that is not aligned, it must be heap
3553 // corruption. Remove memory protection and dump data before
3554 // AtomicSetReadBarrierState since it will fault if the address is not
3555 // valid.
3556 region_space_->Unprotect();
3557 heap_->GetVerification()->LogHeapCorruption(holder, offset, ref, /* fatal= */ true);
3558 }
3559 DCHECK(heap_->GetLargeObjectsSpace())
3560 << "ref=" << ref
3561 << " doesn't belong to non-moving space and large object space doesn't exist";
3562 los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
3563 DCHECK(los_bitmap->HasAddress(ref));
3564 }
3565 if (use_generational_cc_) {
3566 // The sticky-bit CC collector is only compatible with Baker-style read barriers.
3567 DCHECK(kUseBakerReadBarrier);
3568 // Not done scanning, use AtomicSetReadBarrierPointer.
3569 if (!done_scanning_.load(std::memory_order_acquire)) {
3570 // Since the mark bitmap is still filled in from last GC, we can not use that or else the
3571 // mutator may see references to the from space. Instead, use the Baker pointer itself as
3572 // the mark bit.
3573 //
3574 // We need to avoid marking objects that are on allocation stack as that will lead to a
3575 // situation (after this GC cycle is finished) where some object(s) are on both allocation
3576 // stack and live bitmap. This leads to visiting the same object(s) twice during a heapdump
3577 // (b/117426281).
3578 if (!IsOnAllocStack(ref) &&
3579 ref->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState())) {
3580 // TODO: We don't actually need to scan this object later, we just need to clear the gray
3581 // bit.
3582 // We don't need to mark newly allocated objects (those in allocation stack) as they can
3583 // only point to to-space objects. Also, they are considered live till the next GC cycle.
3584 PushOntoMarkStack(self, ref);
3585 }
3586 return ref;
3587 }
3588 }
3589 if (!is_los && mark_bitmap->Test(ref)) {
3590 // Already marked.
3591 } else if (is_los && los_bitmap->Test(ref)) {
3592 // Already marked in LOS.
3593 } else if (IsOnAllocStack(ref)) {
3594 // If it's on the allocation stack, it's considered marked. Keep it white (non-gray).
3595 // Objects on the allocation stack need not be marked.
3596 if (!is_los) {
3597 DCHECK(!mark_bitmap->Test(ref));
3598 } else {
3599 DCHECK(!los_bitmap->Test(ref));
3600 }
3601 if (kUseBakerReadBarrier) {
3602 DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::NonGrayState());
3603 }
3604 } else {
3605 // Not marked nor on the allocation stack. Try to mark it.
3606 // This may or may not succeed, which is ok.
3607 bool success = false;
3608 if (kUseBakerReadBarrier) {
3609 success = ref->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(),
3610 ReadBarrier::GrayState());
3611 } else {
3612 success = is_los ?
3613 !los_bitmap->AtomicTestAndSet(ref) :
3614 !mark_bitmap->AtomicTestAndSet(ref);
3615 }
3616 if (success) {
3617 if (kUseBakerReadBarrier) {
3618 DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::GrayState());
3619 }
3620 PushOntoMarkStack(self, ref);
3621 }
3622 }
3623 return ref;
3624 }
3625
FinishPhase()3626 void ConcurrentCopying::FinishPhase() {
3627 Thread* const self = Thread::Current();
3628 {
3629 MutexLock mu(self, mark_stack_lock_);
3630 CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
3631 }
3632 // kVerifyNoMissingCardMarks relies on the region space cards not being cleared to avoid false
3633 // positives.
3634 if (!kVerifyNoMissingCardMarks && !use_generational_cc_) {
3635 TimingLogger::ScopedTiming split("ClearRegionSpaceCards", GetTimings());
3636 // We do not currently use the region space cards at all, madvise them away to save ram.
3637 heap_->GetCardTable()->ClearCardRange(region_space_->Begin(), region_space_->Limit());
3638 } else if (use_generational_cc_ && !young_gen_) {
3639 region_space_inter_region_bitmap_->Clear();
3640 non_moving_space_inter_region_bitmap_->Clear();
3641 }
3642 {
3643 MutexLock mu(self, skipped_blocks_lock_);
3644 skipped_blocks_map_.clear();
3645 }
3646 {
3647 ReaderMutexLock mu(self, *Locks::mutator_lock_);
3648 {
3649 WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
3650 heap_->ClearMarkedObjects();
3651 }
3652 if (kUseBakerReadBarrier && kFilterModUnionCards) {
3653 TimingLogger::ScopedTiming split("FilterModUnionCards", GetTimings());
3654 ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
3655 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
3656 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
3657 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
3658 // Filter out cards that don't need to be set.
3659 if (table != nullptr) {
3660 table->FilterCards();
3661 }
3662 }
3663 }
3664 if (kUseBakerReadBarrier) {
3665 TimingLogger::ScopedTiming split("EmptyRBMarkBitStack", GetTimings());
3666 DCHECK(rb_mark_bit_stack_ != nullptr);
3667 const auto* limit = rb_mark_bit_stack_->End();
3668 for (StackReference<mirror::Object>* it = rb_mark_bit_stack_->Begin(); it != limit; ++it) {
3669 CHECK(it->AsMirrorPtr()->AtomicSetMarkBit(1, 0))
3670 << "rb_mark_bit_stack_->Begin()" << rb_mark_bit_stack_->Begin() << '\n'
3671 << "rb_mark_bit_stack_->End()" << rb_mark_bit_stack_->End() << '\n'
3672 << "rb_mark_bit_stack_->IsFull()"
3673 << std::boolalpha << rb_mark_bit_stack_->IsFull() << std::noboolalpha << '\n'
3674 << DumpReferenceInfo(it->AsMirrorPtr(), "*it");
3675 }
3676 rb_mark_bit_stack_->Reset();
3677 }
3678 }
3679 if (measure_read_barrier_slow_path_) {
3680 MutexLock mu(self, rb_slow_path_histogram_lock_);
3681 rb_slow_path_time_histogram_.AdjustAndAddValue(
3682 rb_slow_path_ns_.load(std::memory_order_relaxed));
3683 rb_slow_path_count_total_ += rb_slow_path_count_.load(std::memory_order_relaxed);
3684 rb_slow_path_count_gc_total_ += rb_slow_path_count_gc_.load(std::memory_order_relaxed);
3685 }
3686 }
3687
IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object> * field,bool do_atomic_update)3688 bool ConcurrentCopying::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
3689 bool do_atomic_update) {
3690 mirror::Object* from_ref = field->AsMirrorPtr();
3691 if (from_ref == nullptr) {
3692 return true;
3693 }
3694 mirror::Object* to_ref = IsMarked(from_ref);
3695 if (to_ref == nullptr) {
3696 return false;
3697 }
3698 if (from_ref != to_ref) {
3699 if (do_atomic_update) {
3700 do {
3701 if (field->AsMirrorPtr() != from_ref) {
3702 // Concurrently overwritten by a mutator.
3703 break;
3704 }
3705 } while (!field->CasWeakRelaxed(from_ref, to_ref));
3706 } else {
3707 // TODO: Why is this seq_cst when the above is relaxed? Document memory ordering.
3708 field->Assign</* kIsVolatile= */ true>(to_ref);
3709 }
3710 }
3711 return true;
3712 }
3713
MarkObject(mirror::Object * from_ref)3714 mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) {
3715 return Mark(Thread::Current(), from_ref);
3716 }
3717
DelayReferenceReferent(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> reference)3718 void ConcurrentCopying::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
3719 ObjPtr<mirror::Reference> reference) {
3720 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
3721 }
3722
ProcessReferences(Thread * self)3723 void ConcurrentCopying::ProcessReferences(Thread* self) {
3724 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings());
3725 // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
3726 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3727 GetHeap()->GetReferenceProcessor()->ProcessReferences(
3728 /*concurrent=*/ true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
3729 }
3730
RevokeAllThreadLocalBuffers()3731 void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
3732 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
3733 region_space_->RevokeAllThreadLocalBuffers();
3734 }
3735
MarkFromReadBarrierWithMeasurements(Thread * const self,mirror::Object * from_ref)3736 mirror::Object* ConcurrentCopying::MarkFromReadBarrierWithMeasurements(Thread* const self,
3737 mirror::Object* from_ref) {
3738 if (self != thread_running_gc_) {
3739 rb_slow_path_count_.fetch_add(1u, std::memory_order_relaxed);
3740 } else {
3741 rb_slow_path_count_gc_.fetch_add(1u, std::memory_order_relaxed);
3742 }
3743 ScopedTrace tr(__FUNCTION__);
3744 const uint64_t start_time = measure_read_barrier_slow_path_ ? NanoTime() : 0u;
3745 mirror::Object* ret =
3746 Mark</*kGrayImmuneObject=*/true, /*kNoUnEvac=*/false, /*kFromGCThread=*/false>(self,
3747 from_ref);
3748 if (measure_read_barrier_slow_path_) {
3749 rb_slow_path_ns_.fetch_add(NanoTime() - start_time, std::memory_order_relaxed);
3750 }
3751 return ret;
3752 }
3753
DumpPerformanceInfo(std::ostream & os)3754 void ConcurrentCopying::DumpPerformanceInfo(std::ostream& os) {
3755 GarbageCollector::DumpPerformanceInfo(os);
3756 size_t num_gc_cycles = GetCumulativeTimings().GetIterations();
3757 MutexLock mu(Thread::Current(), rb_slow_path_histogram_lock_);
3758 if (rb_slow_path_time_histogram_.SampleSize() > 0) {
3759 Histogram<uint64_t>::CumulativeData cumulative_data;
3760 rb_slow_path_time_histogram_.CreateHistogram(&cumulative_data);
3761 rb_slow_path_time_histogram_.PrintConfidenceIntervals(os, 0.99, cumulative_data);
3762 }
3763 if (rb_slow_path_count_total_ > 0) {
3764 os << "Slow path count " << rb_slow_path_count_total_ << "\n";
3765 }
3766 if (rb_slow_path_count_gc_total_ > 0) {
3767 os << "GC slow path count " << rb_slow_path_count_gc_total_ << "\n";
3768 }
3769
3770 os << "Average " << (young_gen_ ? "minor" : "major") << " GC reclaim bytes ratio "
3771 << (reclaimed_bytes_ratio_sum_ / num_gc_cycles) << " over " << num_gc_cycles
3772 << " GC cycles\n";
3773
3774 os << "Average " << (young_gen_ ? "minor" : "major") << " GC copied live bytes ratio "
3775 << (copied_live_bytes_ratio_sum_ / gc_count_) << " over " << gc_count_
3776 << " " << (young_gen_ ? "minor" : "major") << " GCs\n";
3777
3778 os << "Cumulative bytes moved "
3779 << cumulative_bytes_moved_.load(std::memory_order_relaxed) << "\n";
3780 os << "Cumulative objects moved "
3781 << cumulative_objects_moved_.load(std::memory_order_relaxed) << "\n";
3782
3783 os << "Peak regions allocated "
3784 << region_space_->GetMaxPeakNumNonFreeRegions() << " ("
3785 << PrettySize(region_space_->GetMaxPeakNumNonFreeRegions() * space::RegionSpace::kRegionSize)
3786 << ") / " << region_space_->GetNumRegions() / 2 << " ("
3787 << PrettySize(region_space_->GetNumRegions() * space::RegionSpace::kRegionSize / 2)
3788 << ")\n";
3789 }
3790
3791 } // namespace collector
3792 } // namespace gc
3793 } // namespace art
3794