1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_
18 #define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_
19
20 #include "concurrent_copying.h"
21
22 #include "gc/accounting/atomic_stack.h"
23 #include "gc/accounting/space_bitmap-inl.h"
24 #include "gc/heap.h"
25 #include "gc/space/region_space-inl.h"
26 #include "gc/verification.h"
27 #include "lock_word.h"
28 #include "mirror/class.h"
29 #include "mirror/object-readbarrier-inl.h"
30
31 namespace art {
32 namespace gc {
33 namespace collector {
34
MarkUnevacFromSpaceRegion(Thread * const self,mirror::Object * ref,accounting::ContinuousSpaceBitmap * bitmap)35 inline mirror::Object* ConcurrentCopying::MarkUnevacFromSpaceRegion(
36 Thread* const self,
37 mirror::Object* ref,
38 accounting::ContinuousSpaceBitmap* bitmap) {
39 if (use_generational_cc_ && !done_scanning_.load(std::memory_order_acquire)) {
40 // Everything in the unevac space should be marked for young generation CC,
41 // except for large objects.
42 DCHECK(!young_gen_ || region_space_bitmap_->Test(ref) || region_space_->IsLargeObject(ref))
43 << ref << " "
44 << ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->PrettyClass();
45 // Since the mark bitmap is still filled in from last GC (or from marking phase of 2-phase CC,
46 // we can not use that or else the mutator may see references to the from space. Instead, use
47 // the baker pointer itself as the mark bit.
48 if (ref->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState())) {
49 // TODO: We don't actually need to scan this object later, we just need to clear the gray
50 // bit.
51 // TODO: We could also set the mark bit here for "free" since this case comes from the
52 // read barrier.
53 PushOntoMarkStack(self, ref);
54 }
55 DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::GrayState());
56 return ref;
57 }
58 // For the Baker-style RB, in a rare case, we could incorrectly change the object from non-gray
59 // (black) to gray even though the object has already been marked through. This happens if a
60 // mutator thread gets preempted before the AtomicSetReadBarrierState below, GC marks through the
61 // object (changes it from non-gray (white) to gray and back to non-gray (black)), and the thread
62 // runs and incorrectly changes it from non-gray (black) to gray. If this happens, the object
63 // will get added to the mark stack again and get changed back to non-gray (black) after it is
64 // processed.
65 if (kUseBakerReadBarrier) {
66 // Test the bitmap first to avoid graying an object that has already been marked through most
67 // of the time.
68 if (bitmap->Test(ref)) {
69 return ref;
70 }
71 }
72 // This may or may not succeed, which is ok because the object may already be gray.
73 bool success = false;
74 if (kUseBakerReadBarrier) {
75 // GC will mark the bitmap when popping from mark stack. If only the GC is touching the bitmap
76 // we can avoid an expensive CAS.
77 // For the baker case, an object is marked if either the mark bit marked or the bitmap bit is
78 // set.
79 success = ref->AtomicSetReadBarrierState(/* expected_rb_state= */ ReadBarrier::NonGrayState(),
80 /* rb_state= */ ReadBarrier::GrayState());
81 } else {
82 success = !bitmap->AtomicTestAndSet(ref);
83 }
84 if (success) {
85 // Newly marked.
86 if (kUseBakerReadBarrier) {
87 DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::GrayState());
88 }
89 PushOntoMarkStack(self, ref);
90 }
91 return ref;
92 }
93
94 template<bool kGrayImmuneObject>
MarkImmuneSpace(Thread * const self,mirror::Object * ref)95 inline mirror::Object* ConcurrentCopying::MarkImmuneSpace(Thread* const self,
96 mirror::Object* ref) {
97 if (kUseBakerReadBarrier) {
98 // The GC-running thread doesn't (need to) gray immune objects except when updating thread roots
99 // in the thread flip on behalf of suspended threads (when gc_grays_immune_objects_ is
100 // true). Also, a mutator doesn't (need to) gray an immune object after GC has updated all
101 // immune space objects (when updated_all_immune_objects_ is true).
102 if (kIsDebugBuild) {
103 if (self == thread_running_gc_) {
104 DCHECK(!kGrayImmuneObject ||
105 updated_all_immune_objects_.load(std::memory_order_relaxed) ||
106 gc_grays_immune_objects_);
107 } else {
108 DCHECK(kGrayImmuneObject);
109 }
110 }
111 if (!kGrayImmuneObject || updated_all_immune_objects_.load(std::memory_order_relaxed)) {
112 return ref;
113 }
114 // This may or may not succeed, which is ok because the object may already be gray.
115 bool success =
116 ref->AtomicSetReadBarrierState(/* expected_rb_state= */ ReadBarrier::NonGrayState(),
117 /* rb_state= */ ReadBarrier::GrayState());
118 if (success) {
119 MutexLock mu(self, immune_gray_stack_lock_);
120 immune_gray_stack_.push_back(ref);
121 }
122 }
123 return ref;
124 }
125
126 template<bool kGrayImmuneObject, bool kNoUnEvac, bool kFromGCThread>
Mark(Thread * const self,mirror::Object * from_ref,mirror::Object * holder,MemberOffset offset)127 inline mirror::Object* ConcurrentCopying::Mark(Thread* const self,
128 mirror::Object* from_ref,
129 mirror::Object* holder,
130 MemberOffset offset) {
131 // Cannot have `kNoUnEvac` when Generational CC collection is disabled.
132 DCHECK_IMPLIES(kNoUnEvac, use_generational_cc_);
133 if (from_ref == nullptr) {
134 return nullptr;
135 }
136 DCHECK(heap_->collector_type_ == kCollectorTypeCC);
137 if (kFromGCThread) {
138 DCHECK(is_active_);
139 DCHECK_EQ(self, thread_running_gc_);
140 } else if (UNLIKELY(kUseBakerReadBarrier && !is_active_)) {
141 // In the lock word forward address state, the read barrier bits
142 // in the lock word are part of the stored forwarding address and
143 // invalid. This is usually OK as the from-space copy of objects
144 // aren't accessed by mutators due to the to-space
145 // invariant. However, during the dex2oat image writing relocation
146 // and the zygote compaction, objects can be in the forward
147 // address state (to store the forward/relocation addresses) and
148 // they can still be accessed and the invalid read barrier bits
149 // are consulted. If they look like gray but aren't really, the
150 // read barriers slow path can trigger when it shouldn't. To guard
151 // against this, return here if the CC collector isn't running.
152 return from_ref;
153 }
154 DCHECK(region_space_ != nullptr) << "Read barrier slow path taken when CC isn't running?";
155 if (region_space_->HasAddress(from_ref)) {
156 space::RegionSpace::RegionType rtype = region_space_->GetRegionTypeUnsafe(from_ref);
157 switch (rtype) {
158 case space::RegionSpace::RegionType::kRegionTypeToSpace:
159 // It's already marked.
160 return from_ref;
161 case space::RegionSpace::RegionType::kRegionTypeFromSpace: {
162 mirror::Object* to_ref = GetFwdPtr(from_ref);
163 if (to_ref == nullptr) {
164 // It isn't marked yet. Mark it by copying it to the to-space.
165 to_ref = Copy(self, from_ref, holder, offset);
166 }
167 // The copy should either be in a to-space region, or in the
168 // non-moving space, if it could not fit in a to-space region.
169 DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref))
170 << "from_ref=" << from_ref << " to_ref=" << to_ref;
171 return to_ref;
172 }
173 case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace:
174 if (kNoUnEvac && use_generational_cc_ && !region_space_->IsLargeObject(from_ref)) {
175 if (!kFromGCThread) {
176 DCHECK(IsMarkedInUnevacFromSpace(from_ref)) << "Returning unmarked object to mutator";
177 }
178 return from_ref;
179 }
180 return MarkUnevacFromSpaceRegion(self, from_ref, region_space_bitmap_);
181 default:
182 // The reference is in an unused region. Remove memory protection from
183 // the region space and log debugging information.
184 region_space_->Unprotect();
185 LOG(FATAL_WITHOUT_ABORT) << DumpHeapReference(holder, offset, from_ref);
186 region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
187 heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal= */ true);
188 UNREACHABLE();
189 }
190 } else {
191 if (immune_spaces_.ContainsObject(from_ref)) {
192 return MarkImmuneSpace<kGrayImmuneObject>(self, from_ref);
193 } else {
194 return MarkNonMoving(self, from_ref, holder, offset);
195 }
196 }
197 }
198
MarkFromReadBarrier(mirror::Object * from_ref)199 inline mirror::Object* ConcurrentCopying::MarkFromReadBarrier(mirror::Object* from_ref) {
200 mirror::Object* ret;
201 Thread* const self = Thread::Current();
202 // We can get here before marking starts since we gray immune objects before the marking phase.
203 if (from_ref == nullptr || !self->GetIsGcMarking()) {
204 return from_ref;
205 }
206 // TODO: Consider removing this check when we are done investigating slow paths. b/30162165
207 if (UNLIKELY(mark_from_read_barrier_measurements_)) {
208 ret = MarkFromReadBarrierWithMeasurements(self, from_ref);
209 } else {
210 ret = Mark</*kGrayImmuneObject=*/true, /*kNoUnEvac=*/false, /*kFromGCThread=*/false>(self,
211 from_ref);
212 }
213 // Only set the mark bit for baker barrier.
214 if (kUseBakerReadBarrier && LIKELY(!rb_mark_bit_stack_full_ && ret->AtomicSetMarkBit(0, 1))) {
215 // If the mark stack is full, we may temporarily go to mark and back to unmarked. Seeing both
216 // values are OK since the only race is doing an unnecessary Mark.
217 if (!rb_mark_bit_stack_->AtomicPushBack(ret)) {
218 // Mark stack is full, set the bit back to zero.
219 CHECK(ret->AtomicSetMarkBit(1, 0));
220 // Set rb_mark_bit_stack_full_, this is racy but OK since AtomicPushBack is thread safe.
221 rb_mark_bit_stack_full_ = true;
222 }
223 }
224 return ret;
225 }
226
GetFwdPtrUnchecked(mirror::Object * from_ref)227 inline mirror::Object* ConcurrentCopying::GetFwdPtrUnchecked(mirror::Object* from_ref) {
228 LockWord lw = from_ref->GetLockWord(false);
229 if (lw.GetState() == LockWord::kForwardingAddress) {
230 mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress());
231 DCHECK(fwd_ptr != nullptr);
232 return fwd_ptr;
233 } else {
234 return nullptr;
235 }
236 }
237
GetFwdPtr(mirror::Object * from_ref)238 inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) {
239 DCHECK(region_space_->IsInFromSpace(from_ref));
240 return GetFwdPtrUnchecked(from_ref);
241 }
242
IsMarkedInUnevacFromSpace(mirror::Object * from_ref)243 inline bool ConcurrentCopying::IsMarkedInUnevacFromSpace(mirror::Object* from_ref) {
244 // Use load-acquire on the read barrier pointer to ensure that we never see a black (non-gray)
245 // read barrier state with an unmarked bit due to reordering.
246 DCHECK(region_space_->IsInUnevacFromSpace(from_ref));
247 if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
248 return true;
249 } else if (!use_generational_cc_ || done_scanning_.load(std::memory_order_acquire)) {
250 // If the card table scanning is not finished yet, then only read-barrier
251 // state should be checked. Checking the mark bitmap is unreliable as there
252 // may be some objects - whose corresponding card is dirty - which are
253 // marked in the mark bitmap, but cannot be considered marked unless their
254 // read-barrier state is set to Gray.
255 //
256 // Why read read-barrier state before checking done_scanning_?
257 // If the read-barrier state was read *after* done_scanning_, then there
258 // exists a concurrency race due to which even after the object is marked,
259 // read-barrier state is checked *after* that, this function will return
260 // false. The following scenario may cause the race:
261 //
262 // 1. Mutator thread reads done_scanning_ and upon finding it false, gets
263 // suspended before reading the object's read-barrier state.
264 // 2. GC thread finishes card-table scan and then sets done_scanning_ to
265 // true.
266 // 3. GC thread grays the object, scans it, marks in the bitmap, and then
267 // changes its read-barrier state back to non-gray.
268 // 4. Mutator thread resumes, reads the object's read-barrier state and
269 // returns false.
270 return region_space_bitmap_->Test(from_ref);
271 }
272 return false;
273 }
274
275 } // namespace collector
276 } // namespace gc
277 } // namespace art
278
279 #endif // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_
280