• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_READ_BARRIER_INL_H_
18 #define ART_RUNTIME_READ_BARRIER_INL_H_
19 
20 #include "read_barrier.h"
21 
22 #include "base/utils.h"
23 #include "gc/accounting/read_barrier_table.h"
24 #include "gc/collector/concurrent_copying-inl.h"
25 #include "gc/heap.h"
26 #include "mirror/object-readbarrier-inl.h"
27 #include "mirror/object_reference.h"
28 #include "mirror/reference.h"
29 #include "runtime.h"
30 
31 namespace art {
32 
33 // Disabled for performance reasons.
34 static constexpr bool kCheckDebugDisallowReadBarrierCount = false;
35 
36 template <typename MirrorType, bool kIsVolatile, ReadBarrierOption kReadBarrierOption,
37           bool kAlwaysUpdateField>
Barrier(mirror::Object * obj,MemberOffset offset,mirror::HeapReference<MirrorType> * ref_addr)38 inline MirrorType* ReadBarrier::Barrier(
39     mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
40   constexpr bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
41   if (kUseReadBarrier && with_read_barrier) {
42     if (kCheckDebugDisallowReadBarrierCount) {
43       Thread* const self = Thread::Current();
44       if (self != nullptr) {
45         CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
46       }
47     }
48     if (kUseBakerReadBarrier) {
49       // fake_address_dependency (must be zero) is used to create artificial data dependency from
50       // the is_gray load to the ref field (ptr) load to avoid needing a load-load barrier between
51       // the two.
52       uintptr_t fake_address_dependency;
53       bool is_gray = IsGray(obj, &fake_address_dependency);
54       if (kEnableReadBarrierInvariantChecks) {
55         CHECK_EQ(fake_address_dependency, 0U) << obj << " rb_state=" << obj->GetReadBarrierState();
56       }
57       ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>(
58           fake_address_dependency | reinterpret_cast<uintptr_t>(ref_addr));
59       MirrorType* ref = ref_addr->template AsMirrorPtr<kIsVolatile>();
60       MirrorType* old_ref = ref;
61       if (is_gray) {
62         // Slow-path.
63         ref = reinterpret_cast<MirrorType*>(Mark(ref));
64         // If kAlwaysUpdateField is true, update the field atomically. This may fail if mutator
65         // updates before us, but it's OK.
66         if (kAlwaysUpdateField && ref != old_ref) {
67           obj->CasFieldStrongReleaseObjectWithoutWriteBarrier<false, false>(
68               offset, old_ref, ref);
69         }
70       }
71       AssertToSpaceInvariant(obj, offset, ref);
72       return ref;
73     } else if (kUseBrooksReadBarrier) {
74       // To be implemented.
75       return ref_addr->template AsMirrorPtr<kIsVolatile>();
76     } else if (kUseTableLookupReadBarrier) {
77       MirrorType* ref = ref_addr->template AsMirrorPtr<kIsVolatile>();
78       MirrorType* old_ref = ref;
79       // The heap or the collector can be null at startup. TODO: avoid the need for this null check.
80       gc::Heap* heap = Runtime::Current()->GetHeap();
81       if (heap != nullptr && heap->GetReadBarrierTable()->IsSet(old_ref)) {
82         ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
83         // Update the field atomically. This may fail if mutator updates before us, but it's ok.
84         if (ref != old_ref) {
85           obj->CasFieldStrongReleaseObjectWithoutWriteBarrier<false, false>(
86               offset, old_ref, ref);
87         }
88       }
89       AssertToSpaceInvariant(obj, offset, ref);
90       return ref;
91     } else {
92       LOG(FATAL) << "Unexpected read barrier type";
93       UNREACHABLE();
94     }
95   } else {
96     // No read barrier.
97     return ref_addr->template AsMirrorPtr<kIsVolatile>();
98   }
99 }
100 
101 template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
BarrierForRoot(MirrorType ** root,GcRootSource * gc_root_source)102 inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root,
103                                                GcRootSource* gc_root_source) {
104   MirrorType* ref = *root;
105   const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
106   if (kUseReadBarrier && with_read_barrier) {
107     if (kIsDebugBuild) {
108       Thread* const self = Thread::Current();
109       if (self != nullptr) {
110         CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
111       }
112     }
113     if (kUseBakerReadBarrier) {
114       // TODO: separate the read barrier code from the collector code more.
115       Thread* self = Thread::Current();
116       if (self != nullptr && self->GetIsGcMarking()) {
117         ref = reinterpret_cast<MirrorType*>(Mark(ref));
118       }
119       AssertToSpaceInvariant(gc_root_source, ref);
120       return ref;
121     } else if (kUseBrooksReadBarrier) {
122       // To be implemented.
123       return ref;
124     } else if (kUseTableLookupReadBarrier) {
125       Thread* self = Thread::Current();
126       if (self != nullptr &&
127           self->GetIsGcMarking() &&
128           Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
129         MirrorType* old_ref = ref;
130         ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
131         // Update the field atomically. This may fail if mutator updates before us, but it's ok.
132         if (ref != old_ref) {
133           Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root);
134           atomic_root->CompareAndSetStrongRelaxed(old_ref, ref);
135         }
136       }
137       AssertToSpaceInvariant(gc_root_source, ref);
138       return ref;
139     } else {
140       LOG(FATAL) << "Unexpected read barrier type";
141       UNREACHABLE();
142     }
143   } else {
144     return ref;
145   }
146 }
147 
148 // TODO: Reduce copy paste
149 template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
BarrierForRoot(mirror::CompressedReference<MirrorType> * root,GcRootSource * gc_root_source)150 inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<MirrorType>* root,
151                                                GcRootSource* gc_root_source) {
152   MirrorType* ref = root->AsMirrorPtr();
153   const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
154   if (with_read_barrier && kUseBakerReadBarrier) {
155     // TODO: separate the read barrier code from the collector code more.
156     Thread* self = Thread::Current();
157     if (self != nullptr && self->GetIsGcMarking()) {
158       ref = reinterpret_cast<MirrorType*>(Mark(ref));
159     }
160     AssertToSpaceInvariant(gc_root_source, ref);
161     return ref;
162   } else if (with_read_barrier && kUseBrooksReadBarrier) {
163     // To be implemented.
164     return ref;
165   } else if (with_read_barrier && kUseTableLookupReadBarrier) {
166     Thread* self = Thread::Current();
167     if (self != nullptr &&
168         self->GetIsGcMarking() &&
169         Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
170       auto old_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
171       ref = reinterpret_cast<MirrorType*>(Mark(ref));
172       auto new_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
173       // Update the field atomically. This may fail if mutator updates before us, but it's ok.
174       if (new_ref.AsMirrorPtr() != old_ref.AsMirrorPtr()) {
175         auto* atomic_root =
176             reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
177         atomic_root->CompareAndSetStrongRelaxed(old_ref, new_ref);
178       }
179     }
180     AssertToSpaceInvariant(gc_root_source, ref);
181     return ref;
182   } else {
183     return ref;
184   }
185 }
186 
187 template <typename MirrorType>
IsMarked(MirrorType * ref)188 inline MirrorType* ReadBarrier::IsMarked(MirrorType* ref) {
189   // Only read-barrier configurations can have mutators run while
190   // the GC is marking.
191   if (!kUseReadBarrier) {
192     return ref;
193   }
194   // IsMarked does not handle null, so handle it here.
195   if (ref == nullptr) {
196     return nullptr;
197   }
198   // IsMarked should only be called when the GC is marking.
199   if (!Thread::Current()->GetIsGcMarking()) {
200     return ref;
201   }
202 
203   return reinterpret_cast<MirrorType*>(
204       Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarked(ref));
205 }
206 
IsDuringStartup()207 inline bool ReadBarrier::IsDuringStartup() {
208   gc::Heap* heap = Runtime::Current()->GetHeap();
209   if (heap == nullptr) {
210     // During startup, the heap can be null.
211     return true;
212   }
213   if (heap->CurrentCollectorType() != gc::kCollectorTypeCC) {
214     // CC isn't running.
215     return true;
216   }
217   gc::collector::ConcurrentCopying* collector = heap->ConcurrentCopyingCollector();
218   if (collector == nullptr) {
219     // During startup, the collector can be null.
220     return true;
221   }
222   return false;
223 }
224 
AssertToSpaceInvariant(mirror::Object * obj,MemberOffset offset,mirror::Object * ref)225 inline void ReadBarrier::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
226                                                 mirror::Object* ref) {
227   if (kEnableToSpaceInvariantChecks) {
228     if (ref == nullptr || IsDuringStartup()) {
229       return;
230     }
231     Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
232         AssertToSpaceInvariant(obj, offset, ref);
233   }
234 }
235 
AssertToSpaceInvariant(GcRootSource * gc_root_source,mirror::Object * ref)236 inline void ReadBarrier::AssertToSpaceInvariant(GcRootSource* gc_root_source,
237                                                 mirror::Object* ref) {
238   if (kEnableToSpaceInvariantChecks) {
239     if (ref == nullptr || IsDuringStartup()) {
240       return;
241     }
242     Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
243         AssertToSpaceInvariant(gc_root_source, ref);
244   }
245 }
246 
Mark(mirror::Object * obj)247 inline mirror::Object* ReadBarrier::Mark(mirror::Object* obj) {
248   return Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->MarkFromReadBarrier(obj);
249 }
250 
IsGray(mirror::Object * obj,uintptr_t * fake_address_dependency)251 inline bool ReadBarrier::IsGray(mirror::Object* obj, uintptr_t* fake_address_dependency) {
252   return obj->GetReadBarrierState(fake_address_dependency) == gray_state_;
253 }
254 
IsGray(mirror::Object * obj)255 inline bool ReadBarrier::IsGray(mirror::Object* obj) {
256   // Use a load-acquire to load the read barrier bit to avoid reordering with the subsequent load.
257   // GetReadBarrierStateAcquire() has load-acquire semantics.
258   return obj->GetReadBarrierStateAcquire() == gray_state_;
259 }
260 
261 }  // namespace art
262 
263 #endif  // ART_RUNTIME_READ_BARRIER_INL_H_
264