• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_READ_BARRIER_INL_H_
18 #define ART_RUNTIME_READ_BARRIER_INL_H_
19 
20 #include "read_barrier.h"
21 
22 #include "gc/accounting/read_barrier_table.h"
23 #include "gc/collector/concurrent_copying-inl.h"
24 #include "gc/heap.h"
25 #include "mirror/object_reference.h"
26 #include "mirror/object-readbarrier-inl.h"
27 #include "mirror/reference.h"
28 #include "runtime.h"
29 #include "utils.h"
30 
31 namespace art {
32 
33 // Disabled for performance reasons.
34 static constexpr bool kCheckDebugDisallowReadBarrierCount = false;
35 
36 template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kAlwaysUpdateField>
Barrier(mirror::Object * obj,MemberOffset offset,mirror::HeapReference<MirrorType> * ref_addr)37 inline MirrorType* ReadBarrier::Barrier(
38     mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
39   constexpr bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
40   if (kUseReadBarrier && with_read_barrier) {
41     if (kCheckDebugDisallowReadBarrierCount) {
42       Thread* const self = Thread::Current();
43       if (self != nullptr) {
44         CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
45       }
46     }
47     if (kUseBakerReadBarrier) {
48       // fake_address_dependency (must be zero) is used to create artificial data dependency from
49       // the is_gray load to the ref field (ptr) load to avoid needing a load-load barrier between
50       // the two.
51       uintptr_t fake_address_dependency;
52       bool is_gray = IsGray(obj, &fake_address_dependency);
53       if (kEnableReadBarrierInvariantChecks) {
54         CHECK_EQ(fake_address_dependency, 0U) << obj << " rb_state=" << obj->GetReadBarrierState();
55       }
56       ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>(
57           fake_address_dependency | reinterpret_cast<uintptr_t>(ref_addr));
58       MirrorType* ref = ref_addr->AsMirrorPtr();
59       MirrorType* old_ref = ref;
60       if (is_gray) {
61         // Slow-path.
62         ref = reinterpret_cast<MirrorType*>(Mark(ref));
63         // If kAlwaysUpdateField is true, update the field atomically. This may fail if mutator
64         // updates before us, but it's OK.
65         if (kAlwaysUpdateField && ref != old_ref) {
66           obj->CasFieldStrongReleaseObjectWithoutWriteBarrier<false, false>(
67               offset, old_ref, ref);
68         }
69       }
70       AssertToSpaceInvariant(obj, offset, ref);
71       return ref;
72     } else if (kUseBrooksReadBarrier) {
73       // To be implemented.
74       return ref_addr->AsMirrorPtr();
75     } else if (kUseTableLookupReadBarrier) {
76       MirrorType* ref = ref_addr->AsMirrorPtr();
77       MirrorType* old_ref = ref;
78       // The heap or the collector can be null at startup. TODO: avoid the need for this null check.
79       gc::Heap* heap = Runtime::Current()->GetHeap();
80       if (heap != nullptr && heap->GetReadBarrierTable()->IsSet(old_ref)) {
81         ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
82         // Update the field atomically. This may fail if mutator updates before us, but it's ok.
83         if (ref != old_ref) {
84           obj->CasFieldStrongReleaseObjectWithoutWriteBarrier<false, false>(
85               offset, old_ref, ref);
86         }
87       }
88       AssertToSpaceInvariant(obj, offset, ref);
89       return ref;
90     } else {
91       LOG(FATAL) << "Unexpected read barrier type";
92       UNREACHABLE();
93     }
94   } else {
95     // No read barrier.
96     return ref_addr->AsMirrorPtr();
97   }
98 }
99 
100 template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
BarrierForRoot(MirrorType ** root,GcRootSource * gc_root_source)101 inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root,
102                                                GcRootSource* gc_root_source) {
103   MirrorType* ref = *root;
104   const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
105   if (kUseReadBarrier && with_read_barrier) {
106     if (kIsDebugBuild) {
107       Thread* const self = Thread::Current();
108       if (self != nullptr) {
109         CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
110       }
111     }
112     if (kUseBakerReadBarrier) {
113       // TODO: separate the read barrier code from the collector code more.
114       Thread* self = Thread::Current();
115       if (self != nullptr && self->GetIsGcMarking()) {
116         ref = reinterpret_cast<MirrorType*>(Mark(ref));
117       }
118       AssertToSpaceInvariant(gc_root_source, ref);
119       return ref;
120     } else if (kUseBrooksReadBarrier) {
121       // To be implemented.
122       return ref;
123     } else if (kUseTableLookupReadBarrier) {
124       Thread* self = Thread::Current();
125       if (self != nullptr &&
126           self->GetIsGcMarking() &&
127           Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
128         MirrorType* old_ref = ref;
129         ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
130         // Update the field atomically. This may fail if mutator updates before us, but it's ok.
131         if (ref != old_ref) {
132           Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root);
133           atomic_root->CompareExchangeStrongRelaxed(old_ref, ref);
134         }
135       }
136       AssertToSpaceInvariant(gc_root_source, ref);
137       return ref;
138     } else {
139       LOG(FATAL) << "Unexpected read barrier type";
140       UNREACHABLE();
141     }
142   } else {
143     return ref;
144   }
145 }
146 
147 // TODO: Reduce copy paste
148 template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
BarrierForRoot(mirror::CompressedReference<MirrorType> * root,GcRootSource * gc_root_source)149 inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<MirrorType>* root,
150                                                GcRootSource* gc_root_source) {
151   MirrorType* ref = root->AsMirrorPtr();
152   const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
153   if (with_read_barrier && kUseBakerReadBarrier) {
154     // TODO: separate the read barrier code from the collector code more.
155     Thread* self = Thread::Current();
156     if (self != nullptr && self->GetIsGcMarking()) {
157       ref = reinterpret_cast<MirrorType*>(Mark(ref));
158     }
159     AssertToSpaceInvariant(gc_root_source, ref);
160     return ref;
161   } else if (with_read_barrier && kUseBrooksReadBarrier) {
162     // To be implemented.
163     return ref;
164   } else if (with_read_barrier && kUseTableLookupReadBarrier) {
165     Thread* self = Thread::Current();
166     if (self != nullptr &&
167         self->GetIsGcMarking() &&
168         Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
169       auto old_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
170       ref = reinterpret_cast<MirrorType*>(Mark(ref));
171       auto new_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
172       // Update the field atomically. This may fail if mutator updates before us, but it's ok.
173       if (new_ref.AsMirrorPtr() != old_ref.AsMirrorPtr()) {
174         auto* atomic_root =
175             reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
176         atomic_root->CompareExchangeStrongRelaxed(old_ref, new_ref);
177       }
178     }
179     AssertToSpaceInvariant(gc_root_source, ref);
180     return ref;
181   } else {
182     return ref;
183   }
184 }
185 
186 template <typename MirrorType>
IsMarked(MirrorType * ref)187 inline MirrorType* ReadBarrier::IsMarked(MirrorType* ref) {
188   // Only read-barrier configurations can have mutators run while
189   // the GC is marking.
190   if (!kUseReadBarrier) {
191     return ref;
192   }
193   // IsMarked does not handle null, so handle it here.
194   if (ref == nullptr) {
195     return nullptr;
196   }
197   // IsMarked should only be called when the GC is marking.
198   if (!Thread::Current()->GetIsGcMarking()) {
199     return ref;
200   }
201 
202   return reinterpret_cast<MirrorType*>(
203       Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarked(ref));
204 }
205 
IsDuringStartup()206 inline bool ReadBarrier::IsDuringStartup() {
207   gc::Heap* heap = Runtime::Current()->GetHeap();
208   if (heap == nullptr) {
209     // During startup, the heap can be null.
210     return true;
211   }
212   if (heap->CurrentCollectorType() != gc::kCollectorTypeCC) {
213     // CC isn't running.
214     return true;
215   }
216   gc::collector::ConcurrentCopying* collector = heap->ConcurrentCopyingCollector();
217   if (collector == nullptr) {
218     // During startup, the collector can be null.
219     return true;
220   }
221   return false;
222 }
223 
AssertToSpaceInvariant(mirror::Object * obj,MemberOffset offset,mirror::Object * ref)224 inline void ReadBarrier::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
225                                                 mirror::Object* ref) {
226   if (kEnableToSpaceInvariantChecks) {
227     if (ref == nullptr || IsDuringStartup()) {
228       return;
229     }
230     Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
231         AssertToSpaceInvariant(obj, offset, ref);
232   }
233 }
234 
AssertToSpaceInvariant(GcRootSource * gc_root_source,mirror::Object * ref)235 inline void ReadBarrier::AssertToSpaceInvariant(GcRootSource* gc_root_source,
236                                                 mirror::Object* ref) {
237   if (kEnableToSpaceInvariantChecks) {
238     if (ref == nullptr || IsDuringStartup()) {
239       return;
240     }
241     Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
242         AssertToSpaceInvariant(gc_root_source, ref);
243   }
244 }
245 
Mark(mirror::Object * obj)246 inline mirror::Object* ReadBarrier::Mark(mirror::Object* obj) {
247   return Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->MarkFromReadBarrier(obj);
248 }
249 
IsGray(mirror::Object * obj,uintptr_t * fake_address_dependency)250 inline bool ReadBarrier::IsGray(mirror::Object* obj, uintptr_t* fake_address_dependency) {
251   return obj->GetReadBarrierState(fake_address_dependency) == gray_state_;
252 }
253 
IsGray(mirror::Object * obj)254 inline bool ReadBarrier::IsGray(mirror::Object* obj) {
255   // Use a load-acquire to load the read barrier bit to avoid reordering with the subsequent load.
256   // GetReadBarrierStateAcquire() has load-acquire semantics.
257   return obj->GetReadBarrierStateAcquire() == gray_state_;
258 }
259 
260 }  // namespace art
261 
262 #endif  // ART_RUNTIME_READ_BARRIER_INL_H_
263