1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_RUNTIME_READ_BARRIER_INL_H_
18 #define ART_RUNTIME_READ_BARRIER_INL_H_
19
20 #include "read_barrier.h"
21
22 #include "gc/accounting/read_barrier_table.h"
23 #include "gc/collector/concurrent_copying-inl.h"
24 #include "gc/heap.h"
25 #include "mirror/object-readbarrier-inl.h"
26 #include "mirror/object_reference.h"
27 #include "mirror/reference.h"
28 #include "runtime.h"
29
30 namespace art {
31
32 template <typename MirrorType, bool kIsVolatile, ReadBarrierOption kReadBarrierOption,
33 bool kAlwaysUpdateField>
Barrier(mirror::Object * obj,MemberOffset offset,mirror::HeapReference<MirrorType> * ref_addr)34 inline MirrorType* ReadBarrier::Barrier(
35 mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
36 constexpr bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
37 if (kUseReadBarrier && with_read_barrier) {
38 if (kCheckDebugDisallowReadBarrierCount) {
39 Thread* const self = Thread::Current();
40 if (self != nullptr) {
41 CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
42 }
43 }
44 if (kUseBakerReadBarrier) {
45 // fake_address_dependency (must be zero) is used to create artificial data dependency from
46 // the is_gray load to the ref field (ptr) load to avoid needing a load-load barrier between
47 // the two.
48 uintptr_t fake_address_dependency;
49 bool is_gray = IsGray(obj, &fake_address_dependency);
50 if (kEnableReadBarrierInvariantChecks) {
51 CHECK_EQ(fake_address_dependency, 0U) << obj << " rb_state=" << obj->GetReadBarrierState();
52 }
53 ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>(
54 fake_address_dependency | reinterpret_cast<uintptr_t>(ref_addr));
55 MirrorType* ref = ref_addr->template AsMirrorPtr<kIsVolatile>();
56 MirrorType* old_ref = ref;
57 if (is_gray) {
58 // Slow-path.
59 ref = reinterpret_cast<MirrorType*>(Mark(ref));
60 // If kAlwaysUpdateField is true, update the field atomically. This may fail if mutator
61 // updates before us, but it's OK.
62 if (kAlwaysUpdateField && ref != old_ref) {
63 obj->CasFieldObjectWithoutWriteBarrier<false, false>(offset,
64 old_ref,
65 ref,
66 CASMode::kStrong,
67 std::memory_order_release);
68 }
69 }
70 AssertToSpaceInvariant(obj, offset, ref);
71 return ref;
72 } else if (kUseTableLookupReadBarrier) {
73 MirrorType* ref = ref_addr->template AsMirrorPtr<kIsVolatile>();
74 MirrorType* old_ref = ref;
75 // The heap or the collector can be null at startup. TODO: avoid the need for this null check.
76 gc::Heap* heap = Runtime::Current()->GetHeap();
77 if (heap != nullptr && heap->GetReadBarrierTable()->IsSet(old_ref)) {
78 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
79 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
80 if (ref != old_ref) {
81 obj->CasFieldObjectWithoutWriteBarrier<false, false>(offset,
82 old_ref,
83 ref,
84 CASMode::kStrong,
85 std::memory_order_release);
86 }
87 }
88 AssertToSpaceInvariant(obj, offset, ref);
89 return ref;
90 } else {
91 LOG(FATAL) << "Unexpected read barrier type";
92 UNREACHABLE();
93 }
94 } else {
95 // No read barrier.
96 return ref_addr->template AsMirrorPtr<kIsVolatile>();
97 }
98 }
99
100 template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
BarrierForRoot(MirrorType ** root,GcRootSource * gc_root_source)101 inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root,
102 GcRootSource* gc_root_source) {
103 MirrorType* ref = *root;
104 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
105 if (kUseReadBarrier && with_read_barrier) {
106 if (kCheckDebugDisallowReadBarrierCount) {
107 Thread* const self = Thread::Current();
108 if (self != nullptr) {
109 CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
110 }
111 }
112 if (kUseBakerReadBarrier) {
113 // TODO: separate the read barrier code from the collector code more.
114 Thread* self = Thread::Current();
115 if (self != nullptr && self->GetIsGcMarking()) {
116 ref = reinterpret_cast<MirrorType*>(Mark(ref));
117 }
118 AssertToSpaceInvariant(gc_root_source, ref);
119 return ref;
120 } else if (kUseTableLookupReadBarrier) {
121 Thread* self = Thread::Current();
122 if (self != nullptr &&
123 self->GetIsGcMarking() &&
124 Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
125 MirrorType* old_ref = ref;
126 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
127 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
128 if (ref != old_ref) {
129 Atomic<MirrorType*>* atomic_root = reinterpret_cast<Atomic<MirrorType*>*>(root);
130 atomic_root->CompareAndSetStrongRelaxed(old_ref, ref);
131 }
132 }
133 AssertToSpaceInvariant(gc_root_source, ref);
134 return ref;
135 } else {
136 LOG(FATAL) << "Unexpected read barrier type";
137 UNREACHABLE();
138 }
139 } else {
140 return ref;
141 }
142 }
143
144 // TODO: Reduce copy paste
145 template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
BarrierForRoot(mirror::CompressedReference<MirrorType> * root,GcRootSource * gc_root_source)146 inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<MirrorType>* root,
147 GcRootSource* gc_root_source) {
148 MirrorType* ref = root->AsMirrorPtr();
149 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
150 if (kUseReadBarrier && with_read_barrier) {
151 if (kCheckDebugDisallowReadBarrierCount) {
152 Thread* const self = Thread::Current();
153 if (self != nullptr) {
154 CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
155 }
156 }
157 if (kUseBakerReadBarrier) {
158 // TODO: separate the read barrier code from the collector code more.
159 Thread* self = Thread::Current();
160 if (self != nullptr && self->GetIsGcMarking()) {
161 ref = reinterpret_cast<MirrorType*>(Mark(ref));
162 }
163 AssertToSpaceInvariant(gc_root_source, ref);
164 return ref;
165 } else if (kUseTableLookupReadBarrier) {
166 Thread* self = Thread::Current();
167 if (self != nullptr &&
168 self->GetIsGcMarking() &&
169 Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
170 auto old_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
171 ref = reinterpret_cast<MirrorType*>(Mark(ref));
172 auto new_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
173 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
174 if (new_ref.AsMirrorPtr() != old_ref.AsMirrorPtr()) {
175 auto* atomic_root =
176 reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
177 atomic_root->CompareAndSetStrongRelaxed(old_ref, new_ref);
178 }
179 }
180 AssertToSpaceInvariant(gc_root_source, ref);
181 return ref;
182 } else {
183 LOG(FATAL) << "Unexpected read barrier type";
184 UNREACHABLE();
185 }
186 } else {
187 return ref;
188 }
189 }
190
191 template <typename MirrorType>
IsMarked(MirrorType * ref)192 inline MirrorType* ReadBarrier::IsMarked(MirrorType* ref) {
193 // Only read-barrier configurations can have mutators run while
194 // the GC is marking.
195 if (!kUseReadBarrier) {
196 return ref;
197 }
198 // IsMarked does not handle null, so handle it here.
199 if (ref == nullptr) {
200 return nullptr;
201 }
202 // IsMarked should only be called when the GC is marking.
203 if (!Thread::Current()->GetIsGcMarking()) {
204 return ref;
205 }
206
207 return reinterpret_cast<MirrorType*>(
208 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarked(ref));
209 }
210
IsDuringStartup()211 inline bool ReadBarrier::IsDuringStartup() {
212 gc::Heap* heap = Runtime::Current()->GetHeap();
213 if (heap == nullptr) {
214 // During startup, the heap can be null.
215 return true;
216 }
217 if (heap->CurrentCollectorType() != gc::kCollectorTypeCC) {
218 // CC isn't running.
219 return true;
220 }
221 gc::collector::ConcurrentCopying* collector = heap->ConcurrentCopyingCollector();
222 if (collector == nullptr) {
223 // During startup, the collector can be null.
224 return true;
225 }
226 return false;
227 }
228
AssertToSpaceInvariant(mirror::Object * obj,MemberOffset offset,mirror::Object * ref)229 inline void ReadBarrier::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
230 mirror::Object* ref) {
231 if (kEnableToSpaceInvariantChecks) {
232 if (ref == nullptr || IsDuringStartup()) {
233 return;
234 }
235 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
236 AssertToSpaceInvariant(obj, offset, ref);
237 }
238 }
239
AssertToSpaceInvariant(GcRootSource * gc_root_source,mirror::Object * ref)240 inline void ReadBarrier::AssertToSpaceInvariant(GcRootSource* gc_root_source,
241 mirror::Object* ref) {
242 if (kEnableToSpaceInvariantChecks) {
243 if (ref == nullptr || IsDuringStartup()) {
244 return;
245 }
246 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
247 AssertToSpaceInvariant(gc_root_source, ref);
248 }
249 }
250
Mark(mirror::Object * obj)251 inline mirror::Object* ReadBarrier::Mark(mirror::Object* obj) {
252 return Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->MarkFromReadBarrier(obj);
253 }
254
IsGray(mirror::Object * obj,uintptr_t * fake_address_dependency)255 inline bool ReadBarrier::IsGray(mirror::Object* obj, uintptr_t* fake_address_dependency) {
256 return obj->GetReadBarrierState(fake_address_dependency) == kGrayState;
257 }
258
IsGray(mirror::Object * obj)259 inline bool ReadBarrier::IsGray(mirror::Object* obj) {
260 // Use a load-acquire to load the read barrier bit to avoid reordering with the subsequent load.
261 // GetReadBarrierStateAcquire() has load-acquire semantics.
262 return obj->GetReadBarrierStateAcquire() == kGrayState;
263 }
264
265 } // namespace art
266
267 #endif // ART_RUNTIME_READ_BARRIER_INL_H_
268