• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_
18 #define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_
19 
20 #include "concurrent_copying.h"
21 
22 #include "gc/accounting/space_bitmap-inl.h"
23 #include "gc/heap.h"
24 #include "gc/space/region_space.h"
25 #include "mirror/object-readbarrier-inl.h"
26 #include "lock_word.h"
27 
28 namespace art {
29 namespace gc {
30 namespace collector {
31 
MarkUnevacFromSpaceRegion(mirror::Object * ref,accounting::ContinuousSpaceBitmap * bitmap)32 inline mirror::Object* ConcurrentCopying::MarkUnevacFromSpaceRegion(
33     mirror::Object* ref, accounting::ContinuousSpaceBitmap* bitmap) {
34   // For the Baker-style RB, in a rare case, we could incorrectly change the object from white
35   // to gray even though the object has already been marked through. This happens if a mutator
36   // thread gets preempted before the AtomicSetReadBarrierState below, GC marks through the
37   // object (changes it from white to gray and back to white), and the thread runs and
38   // incorrectly changes it from white to gray. If this happens, the object will get added to the
39   // mark stack again and get changed back to white after it is processed.
40   if (kUseBakerReadBarrier) {
41     // Test the bitmap first to avoid graying an object that has already been marked through most
42     // of the time.
43     if (bitmap->Test(ref)) {
44       return ref;
45     }
46   }
47   // This may or may not succeed, which is ok because the object may already be gray.
48   bool success = false;
49   if (kUseBakerReadBarrier) {
50     // GC will mark the bitmap when popping from mark stack. If only the GC is touching the bitmap
51     // we can avoid an expensive CAS.
52     // For the baker case, an object is marked if either the mark bit marked or the bitmap bit is
53     // set.
54     success = ref->AtomicSetReadBarrierState(ReadBarrier::WhiteState(), ReadBarrier::GrayState());
55   } else {
56     success = !bitmap->AtomicTestAndSet(ref);
57   }
58   if (success) {
59     // Newly marked.
60     if (kUseBakerReadBarrier) {
61       DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::GrayState());
62     }
63     PushOntoMarkStack(ref);
64   }
65   return ref;
66 }
67 
68 template<bool kGrayImmuneObject>
MarkImmuneSpace(mirror::Object * ref)69 inline mirror::Object* ConcurrentCopying::MarkImmuneSpace(mirror::Object* ref) {
70   if (kUseBakerReadBarrier) {
71     // The GC-running thread doesn't (need to) gray immune objects except when updating thread roots
72     // in the thread flip on behalf of suspended threads (when gc_grays_immune_objects_ is
73     // true). Also, a mutator doesn't (need to) gray an immune object after GC has updated all
74     // immune space objects (when updated_all_immune_objects_ is true).
75     if (kIsDebugBuild) {
76       if (Thread::Current() == thread_running_gc_) {
77         DCHECK(!kGrayImmuneObject ||
78                updated_all_immune_objects_.LoadRelaxed() ||
79                gc_grays_immune_objects_);
80       } else {
81         DCHECK(kGrayImmuneObject);
82       }
83     }
84     if (!kGrayImmuneObject || updated_all_immune_objects_.LoadRelaxed()) {
85       return ref;
86     }
87     // This may or may not succeed, which is ok because the object may already be gray.
88     bool success = ref->AtomicSetReadBarrierState(ReadBarrier::WhiteState(),
89                                                   ReadBarrier::GrayState());
90     if (success) {
91       MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
92       immune_gray_stack_.push_back(ref);
93     }
94   }
95   return ref;
96 }
97 
98 template<bool kGrayImmuneObject, bool kFromGCThread>
Mark(mirror::Object * from_ref,mirror::Object * holder,MemberOffset offset)99 inline mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref,
100                                                mirror::Object* holder,
101                                                MemberOffset offset) {
102   if (from_ref == nullptr) {
103     return nullptr;
104   }
105   DCHECK(heap_->collector_type_ == kCollectorTypeCC);
106   if (kFromGCThread) {
107     DCHECK(is_active_);
108     DCHECK_EQ(Thread::Current(), thread_running_gc_);
109   } else if (UNLIKELY(kUseBakerReadBarrier && !is_active_)) {
110     // In the lock word forward address state, the read barrier bits
111     // in the lock word are part of the stored forwarding address and
112     // invalid. This is usually OK as the from-space copy of objects
113     // aren't accessed by mutators due to the to-space
114     // invariant. However, during the dex2oat image writing relocation
115     // and the zygote compaction, objects can be in the forward
116     // address state (to store the forward/relocation addresses) and
117     // they can still be accessed and the invalid read barrier bits
118     // are consulted. If they look like gray but aren't really, the
119     // read barriers slow path can trigger when it shouldn't. To guard
120     // against this, return here if the CC collector isn't running.
121     return from_ref;
122   }
123   DCHECK(region_space_ != nullptr) << "Read barrier slow path taken when CC isn't running?";
124   space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
125   switch (rtype) {
126     case space::RegionSpace::RegionType::kRegionTypeToSpace:
127       // It's already marked.
128       return from_ref;
129     case space::RegionSpace::RegionType::kRegionTypeFromSpace: {
130       mirror::Object* to_ref = GetFwdPtr(from_ref);
131       if (to_ref == nullptr) {
132         // It isn't marked yet. Mark it by copying it to the to-space.
133         to_ref = Copy(from_ref, holder, offset);
134       }
135       DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref))
136           << "from_ref=" << from_ref << " to_ref=" << to_ref;
137       return to_ref;
138     }
139     case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace: {
140       return MarkUnevacFromSpaceRegion(from_ref, region_space_bitmap_);
141     }
142     case space::RegionSpace::RegionType::kRegionTypeNone:
143       if (immune_spaces_.ContainsObject(from_ref)) {
144         return MarkImmuneSpace<kGrayImmuneObject>(from_ref);
145       } else {
146         return MarkNonMoving(from_ref, holder, offset);
147       }
148     default:
149       UNREACHABLE();
150   }
151 }
152 
MarkFromReadBarrier(mirror::Object * from_ref)153 inline mirror::Object* ConcurrentCopying::MarkFromReadBarrier(mirror::Object* from_ref) {
154   mirror::Object* ret;
155   if (from_ref == nullptr) {
156     return from_ref;
157   }
158   // TODO: Consider removing this check when we are done investigating slow paths. b/30162165
159   if (UNLIKELY(mark_from_read_barrier_measurements_)) {
160     ret = MarkFromReadBarrierWithMeasurements(from_ref);
161   } else {
162     ret = Mark(from_ref);
163   }
164   // Only set the mark bit for baker barrier.
165   if (kUseBakerReadBarrier && LIKELY(!rb_mark_bit_stack_full_ && ret->AtomicSetMarkBit(0, 1))) {
166     // If the mark stack is full, we may temporarily go to mark and back to unmarked. Seeing both
167     // values are OK since the only race is doing an unnecessary Mark.
168     if (!rb_mark_bit_stack_->AtomicPushBack(ret)) {
169       // Mark stack is full, set the bit back to zero.
170       CHECK(ret->AtomicSetMarkBit(1, 0));
171       // Set rb_mark_bit_stack_full_, this is racy but OK since AtomicPushBack is thread safe.
172       rb_mark_bit_stack_full_ = true;
173     }
174   }
175   return ret;
176 }
177 
GetFwdPtr(mirror::Object * from_ref)178 inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) {
179   DCHECK(region_space_->IsInFromSpace(from_ref));
180   LockWord lw = from_ref->GetLockWord(false);
181   if (lw.GetState() == LockWord::kForwardingAddress) {
182     mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress());
183     DCHECK(fwd_ptr != nullptr);
184     return fwd_ptr;
185   } else {
186     return nullptr;
187   }
188 }
189 
IsMarkedInUnevacFromSpace(mirror::Object * from_ref)190 inline bool ConcurrentCopying::IsMarkedInUnevacFromSpace(mirror::Object* from_ref) {
191   // Use load acquire on the read barrier pointer to ensure that we never see a white read barrier
192   // state with an unmarked bit due to reordering.
193   DCHECK(region_space_->IsInUnevacFromSpace(from_ref));
194   if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
195     return true;
196   }
197   return region_space_bitmap_->Test(from_ref);
198 }
199 
200 }  // namespace collector
201 }  // namespace gc
202 }  // namespace art
203 
204 #endif  // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_
205