1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "reference_processor.h"
18
19 #include "art_field-inl.h"
20 #include "base/mutex.h"
21 #include "base/time_utils.h"
22 #include "base/utils.h"
23 #include "base/systrace.h"
24 #include "class_root-inl.h"
25 #include "collector/garbage_collector.h"
26 #include "jni/java_vm_ext.h"
27 #include "mirror/class-inl.h"
28 #include "mirror/object-inl.h"
29 #include "mirror/reference-inl.h"
30 #include "nativehelper/scoped_local_ref.h"
31 #include "object_callbacks.h"
32 #include "reflection.h"
33 #include "scoped_thread_state_change-inl.h"
34 #include "task_processor.h"
35 #include "thread-inl.h"
36 #include "thread_pool.h"
37 #include "well_known_classes.h"
38
39 namespace art {
40 namespace gc {
41
42 static constexpr bool kAsyncReferenceQueueAdd = false;
43
ReferenceProcessor()44 ReferenceProcessor::ReferenceProcessor()
45 : collector_(nullptr),
46 condition_("reference processor condition", *Locks::reference_processor_lock_) ,
47 soft_reference_queue_(Locks::reference_queue_soft_references_lock_),
48 weak_reference_queue_(Locks::reference_queue_weak_references_lock_),
49 finalizer_reference_queue_(Locks::reference_queue_finalizer_references_lock_),
50 phantom_reference_queue_(Locks::reference_queue_phantom_references_lock_),
51 cleared_references_(Locks::reference_queue_cleared_references_lock_) {
52 }
53
GetSlowPathFlagOffset(ObjPtr<mirror::Class> reference_class)54 static inline MemberOffset GetSlowPathFlagOffset(ObjPtr<mirror::Class> reference_class)
55 REQUIRES_SHARED(Locks::mutator_lock_) {
56 DCHECK(reference_class == GetClassRoot<mirror::Reference>());
57 // Second static field
58 ArtField* field = reference_class->GetStaticField(1);
59 DCHECK_STREQ(field->GetName(), "slowPathEnabled");
60 return field->GetOffset();
61 }
62
SetSlowPathFlag(bool enabled)63 static inline void SetSlowPathFlag(bool enabled) REQUIRES_SHARED(Locks::mutator_lock_) {
64 ObjPtr<mirror::Class> reference_class = GetClassRoot<mirror::Reference>();
65 MemberOffset slow_path_offset = GetSlowPathFlagOffset(reference_class);
66 reference_class->SetFieldBoolean</* kTransactionActive= */ false, /* kCheckTransaction= */ false>(
67 slow_path_offset, enabled ? 1 : 0);
68 }
69
EnableSlowPath()70 void ReferenceProcessor::EnableSlowPath() {
71 SetSlowPathFlag(/* enabled= */ true);
72 }
73
DisableSlowPath(Thread * self)74 void ReferenceProcessor::DisableSlowPath(Thread* self) {
75 SetSlowPathFlag(/* enabled= */ false);
76 condition_.Broadcast(self);
77 }
78
SlowPathEnabled()79 bool ReferenceProcessor::SlowPathEnabled() {
80 ObjPtr<mirror::Class> reference_class = GetClassRoot<mirror::Reference>();
81 MemberOffset slow_path_offset = GetSlowPathFlagOffset(reference_class);
82 return reference_class->GetFieldBoolean(slow_path_offset);
83 }
84
BroadcastForSlowPath(Thread * self)85 void ReferenceProcessor::BroadcastForSlowPath(Thread* self) {
86 MutexLock mu(self, *Locks::reference_processor_lock_);
87 condition_.Broadcast(self);
88 }
89
GetReferent(Thread * self,ObjPtr<mirror::Reference> reference)90 ObjPtr<mirror::Object> ReferenceProcessor::GetReferent(Thread* self,
91 ObjPtr<mirror::Reference> reference) {
92 auto slow_path_required = [this, self]() REQUIRES_SHARED(Locks::mutator_lock_) {
93 return gUseReadBarrier ? !self->GetWeakRefAccessEnabled() : SlowPathEnabled();
94 };
95 if (!slow_path_required()) {
96 return reference->GetReferent();
97 }
98 // If the referent is null then it is already cleared, we can just return null since there is no
99 // scenario where it becomes non-null during the reference processing phase.
100 // A read barrier may be unsafe here, and we use the result only when it's null or marked.
101 ObjPtr<mirror::Object> referent = reference->template GetReferent<kWithoutReadBarrier>();
102 if (referent.IsNull()) {
103 return referent;
104 }
105
106 bool started_trace = false;
107 uint64_t start_millis;
108 auto finish_trace = [](uint64_t start_millis) {
109 ATraceEnd();
110 uint64_t millis = MilliTime() - start_millis;
111 static constexpr uint64_t kReportMillis = 10; // Long enough to risk dropped frames.
112 if (millis > kReportMillis) {
113 LOG(WARNING) << "Weak pointer dereference blocked for " << millis << " milliseconds.";
114 }
115 };
116
117 MutexLock mu(self, *Locks::reference_processor_lock_);
118 // Keeping reference_processor_lock_ blocks the broadcast when we try to reenable the fast path.
119 while (slow_path_required()) {
120 DCHECK(collector_ != nullptr);
121 const bool other_read_barrier = !kUseBakerReadBarrier && gUseReadBarrier;
122 if (UNLIKELY(reference->IsFinalizerReferenceInstance()
123 || rp_state_ == RpState::kStarting /* too early to determine mark state */
124 || (other_read_barrier && reference->IsPhantomReferenceInstance()))) {
125 // Odd cases in which it doesn't hurt to just wait, or the wait is likely to be very brief.
126
127 // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
128 // presence of threads blocking for weak ref access.
129 self->CheckEmptyCheckpointFromWeakRefAccess(Locks::reference_processor_lock_);
130 if (!started_trace) {
131 ATraceBegin("GetReferent blocked");
132 started_trace = true;
133 start_millis = MilliTime();
134 }
135 condition_.WaitHoldingLocks(self);
136 continue;
137 }
138 DCHECK(!reference->IsPhantomReferenceInstance());
139
140 if (rp_state_ == RpState::kInitClearingDone) {
141 // Reachable references have their final referent values.
142 break;
143 }
144 // Although reference processing is not done, we can always predict the correct return value
145 // based on the current mark state. No additional marking from finalizers has been done, since
146 // we hold reference_processor_lock_, which is required to advance to kInitClearingDone.
147 DCHECK(rp_state_ == RpState::kInitMarkingDone);
148 // Re-load and re-check referent, since the current one may have been read before we acquired
149 // reference_lock. In particular a Reference.clear() call may have intervened. (b/33569625)
150 referent = reference->GetReferent<kWithoutReadBarrier>();
151 ObjPtr<mirror::Object> forwarded_ref =
152 referent.IsNull() ? nullptr : collector_->IsMarked(referent.Ptr());
153 // Either the referent was marked, and forwarded_ref is the correct return value, or it
154 // was not, and forwarded_ref == null, which is again the correct return value.
155 if (started_trace) {
156 finish_trace(start_millis);
157 }
158 return forwarded_ref;
159 }
160 if (started_trace) {
161 finish_trace(start_millis);
162 }
163 return reference->GetReferent();
164 }
165
166 // Forward SoftReferences. Can be done before we disable Reference access. Only
167 // invoked if we are not clearing SoftReferences.
ForwardSoftReferences(TimingLogger * timings)168 uint32_t ReferenceProcessor::ForwardSoftReferences(TimingLogger* timings) {
169 TimingLogger::ScopedTiming split(
170 concurrent_ ? "ForwardSoftReferences" : "(Paused)ForwardSoftReferences", timings);
171 // We used to argue that we should be smarter about doing this conditionally, but it's unclear
172 // that's actually better than the more predictable strategy of basically only clearing
173 // SoftReferences just before we would otherwise run out of memory.
174 uint32_t non_null_refs = soft_reference_queue_.ForwardSoftReferences(collector_);
175 if (ATraceEnabled()) {
176 static constexpr size_t kBufSize = 80;
177 char buf[kBufSize];
178 snprintf(buf, kBufSize, "Marking for %" PRIu32 " SoftReferences", non_null_refs);
179 ATraceBegin(buf);
180 collector_->ProcessMarkStack();
181 ATraceEnd();
182 } else {
183 collector_->ProcessMarkStack();
184 }
185 return non_null_refs;
186 }
187
Setup(Thread * self,collector::GarbageCollector * collector,bool concurrent,bool clear_soft_references)188 void ReferenceProcessor::Setup(Thread* self,
189 collector::GarbageCollector* collector,
190 bool concurrent,
191 bool clear_soft_references) {
192 DCHECK(collector != nullptr);
193 MutexLock mu(self, *Locks::reference_processor_lock_);
194 collector_ = collector;
195 rp_state_ = RpState::kStarting;
196 concurrent_ = concurrent;
197 clear_soft_references_ = clear_soft_references;
198 }
199
200 // Process reference class instances and schedule finalizations.
201 // We advance rp_state_ to signal partial completion for the benefit of GetReferent.
ProcessReferences(Thread * self,TimingLogger * timings)202 void ReferenceProcessor::ProcessReferences(Thread* self, TimingLogger* timings) {
203 TimingLogger::ScopedTiming t(concurrent_ ? __FUNCTION__ : "(Paused)ProcessReferences", timings);
204 if (!clear_soft_references_) {
205 // Forward any additional SoftReferences we discovered late, now that reference access has been
206 // inhibited.
207 while (!soft_reference_queue_.IsEmpty()) {
208 ForwardSoftReferences(timings);
209 }
210 }
211 {
212 MutexLock mu(self, *Locks::reference_processor_lock_);
213 if (!gUseReadBarrier) {
214 CHECK_EQ(SlowPathEnabled(), concurrent_) << "Slow path must be enabled iff concurrent";
215 } else {
216 // Weak ref access is enabled at Zygote compaction by SemiSpace (concurrent_ == false).
217 CHECK_EQ(!self->GetWeakRefAccessEnabled(), concurrent_);
218 }
219 DCHECK(rp_state_ == RpState::kStarting);
220 rp_state_ = RpState::kInitMarkingDone;
221 condition_.Broadcast(self);
222 }
223 if (kIsDebugBuild && collector_->IsTransactionActive()) {
224 // In transaction mode, we shouldn't enqueue any Reference to the queues.
225 // See DelayReferenceReferent().
226 DCHECK(soft_reference_queue_.IsEmpty());
227 DCHECK(weak_reference_queue_.IsEmpty());
228 DCHECK(finalizer_reference_queue_.IsEmpty());
229 DCHECK(phantom_reference_queue_.IsEmpty());
230 }
231 // Clear all remaining soft and weak references with white referents.
232 // This misses references only reachable through finalizers.
233 soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector_);
234 weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector_);
235 // Defer PhantomReference processing until we've finished marking through finalizers.
236 {
237 // TODO: Capture mark state of some system weaks here. If the referent was marked here,
238 // then it is now safe to return, since it can only refer to marked objects. If it becomes
239 // marked below, that is no longer guaranteed.
240 MutexLock mu(self, *Locks::reference_processor_lock_);
241 rp_state_ = RpState::kInitClearingDone;
242 // At this point, all mutator-accessible data is marked (black). Objects enqueued for
243 // finalization will only be made available to the mutator via CollectClearedReferences after
244 // we're fully done marking. Soft and WeakReferences accessible to the mutator have been
245 // processed and refer only to black objects. Thus there is no danger of the mutator getting
246 // access to non-black objects. Weak reference processing is still nominally suspended,
247 // But many kinds of references, including all java.lang.ref ones, are handled normally from
248 // here on. See GetReferent().
249 }
250 {
251 TimingLogger::ScopedTiming t2(
252 concurrent_ ? "EnqueueFinalizerReferences" : "(Paused)EnqueueFinalizerReferences", timings);
253 // Preserve all white objects with finalize methods and schedule them for finalization.
254 FinalizerStats finalizer_stats =
255 finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, collector_);
256 if (ATraceEnabled()) {
257 static constexpr size_t kBufSize = 80;
258 char buf[kBufSize];
259 snprintf(buf, kBufSize, "Marking from %" PRIu32 " / %" PRIu32 " finalizers",
260 finalizer_stats.num_enqueued_, finalizer_stats.num_refs_);
261 ATraceBegin(buf);
262 collector_->ProcessMarkStack();
263 ATraceEnd();
264 } else {
265 collector_->ProcessMarkStack();
266 }
267 }
268
269 // Process all soft and weak references with white referents, where the references are reachable
270 // only from finalizers. It is unclear that there is any way to do this without slightly
271 // violating some language spec. We choose to apply normal Reference processing rules for these.
272 // This exposes the following issues:
273 // 1) In the case of an unmarked referent, we may end up enqueuing an "unreachable" reference.
274 // This appears unavoidable, since we need to clear the reference for safety, unless we
275 // mark the referent and undo finalization decisions for objects we encounter during marking.
276 // (Some versions of the RI seem to do something along these lines.)
277 // Or we could clear the reference without enqueuing it, which also seems strange and
278 // unhelpful.
279 // 2) In the case of a marked referent, we will preserve a reference to objects that may have
280 // been enqueued for finalization. Again fixing this would seem to involve at least undoing
281 // previous finalization / reference clearing decisions. (This would also mean than an object
282 // containing both a strong and a WeakReference to the same referent could see the
283 // WeakReference cleared.)
284 // The treatment in (2) is potentially quite dangerous, since Reference.get() can e.g. return a
285 // finalized object containing pointers to native objects that have already been deallocated.
286 // But it can be argued that this is just an instance of the broader rule that it is not safe
287 // for finalizers to access otherwise inaccessible finalizable objects.
288 soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector_,
289 /*report_cleared=*/ true);
290 weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector_,
291 /*report_cleared=*/ true);
292
293 // Clear all phantom references with white referents. It's fine to do this just once here.
294 phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, collector_);
295
296 // At this point all reference queues other than the cleared references should be empty.
297 DCHECK(soft_reference_queue_.IsEmpty());
298 DCHECK(weak_reference_queue_.IsEmpty());
299 DCHECK(finalizer_reference_queue_.IsEmpty());
300 DCHECK(phantom_reference_queue_.IsEmpty());
301
302 {
303 MutexLock mu(self, *Locks::reference_processor_lock_);
304 // Need to always do this since the next GC may be concurrent. Doing this for only concurrent
305 // could result in a stale is_marked_callback_ being called before the reference processing
306 // starts since there is a small window of time where slow_path_enabled_ is enabled but the
307 // callback isn't yet set.
308 if (!gUseReadBarrier && concurrent_) {
309 // Done processing, disable the slow path and broadcast to the waiters.
310 DisableSlowPath(self);
311 }
312 }
313 }
314
315 // Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
316 // marked, put it on the appropriate list in the heap for later processing.
DelayReferenceReferent(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref,collector::GarbageCollector * collector)317 void ReferenceProcessor::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
318 ObjPtr<mirror::Reference> ref,
319 collector::GarbageCollector* collector) {
320 // klass can be the class of the old object if the visitor already updated the class of ref.
321 DCHECK(klass != nullptr);
322 DCHECK(klass->IsTypeOfReferenceClass());
323 mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
324 // do_atomic_update needs to be true because this happens outside of the reference processing
325 // phase.
326 if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update=*/true)) {
327 if (UNLIKELY(collector->IsTransactionActive())) {
328 // In transaction mode, keep the referent alive and avoid any reference processing to avoid the
329 // issue of rolling back reference processing. do_atomic_update needs to be true because this
330 // happens outside of the reference processing phase.
331 if (!referent->IsNull()) {
332 collector->MarkHeapReference(referent, /*do_atomic_update=*/ true);
333 }
334 return;
335 }
336 Thread* self = Thread::Current();
337 // TODO: Remove these locks, and use atomic stacks for storing references?
338 // We need to check that the references haven't already been enqueued since we can end up
339 // scanning the same reference multiple times due to dirty cards.
340 if (klass->IsSoftReferenceClass()) {
341 soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
342 } else if (klass->IsWeakReferenceClass()) {
343 weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
344 } else if (klass->IsFinalizerReferenceClass()) {
345 finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
346 } else if (klass->IsPhantomReferenceClass()) {
347 phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
348 } else {
349 LOG(FATAL) << "Invalid reference type " << klass->PrettyClass() << " " << std::hex
350 << klass->GetAccessFlags();
351 }
352 }
353 }
354
UpdateRoots(IsMarkedVisitor * visitor)355 void ReferenceProcessor::UpdateRoots(IsMarkedVisitor* visitor) {
356 cleared_references_.UpdateRoots(visitor);
357 }
358
359 class ClearedReferenceTask : public HeapTask {
360 public:
ClearedReferenceTask(jobject cleared_references)361 explicit ClearedReferenceTask(jobject cleared_references)
362 : HeapTask(NanoTime()), cleared_references_(cleared_references) {
363 }
Run(Thread * thread)364 void Run(Thread* thread) override {
365 ScopedObjectAccess soa(thread);
366 WellKnownClasses::java_lang_ref_ReferenceQueue_add->InvokeStatic<'V', 'L'>(
367 thread, soa.Decode<mirror::Object>(cleared_references_));
368 soa.Env()->DeleteGlobalRef(cleared_references_);
369 }
370
371 private:
372 const jobject cleared_references_;
373 };
374
CollectClearedReferences(Thread * self)375 SelfDeletingTask* ReferenceProcessor::CollectClearedReferences(Thread* self) {
376 Locks::mutator_lock_->AssertNotHeld(self);
377 // By default we don't actually need to do anything. Just return this no-op task to avoid having
378 // to put in ifs.
379 std::unique_ptr<SelfDeletingTask> result(new FunctionTask([](Thread*) {}));
380 // When a runtime isn't started there are no reference queues to care about so ignore.
381 if (!cleared_references_.IsEmpty()) {
382 if (LIKELY(Runtime::Current()->IsStarted())) {
383 jobject cleared_references;
384 {
385 ReaderMutexLock mu(self, *Locks::mutator_lock_);
386 cleared_references = self->GetJniEnv()->GetVm()->AddGlobalRef(
387 self, cleared_references_.GetList());
388 }
389 if (kAsyncReferenceQueueAdd) {
390 // TODO: This can cause RunFinalization to terminate before newly freed objects are
391 // finalized since they may not be enqueued by the time RunFinalization starts.
392 Runtime::Current()->GetHeap()->GetTaskProcessor()->AddTask(
393 self, new ClearedReferenceTask(cleared_references));
394 } else {
395 result.reset(new ClearedReferenceTask(cleared_references));
396 }
397 }
398 cleared_references_.Clear();
399 }
400 return result.release();
401 }
402
ClearReferent(ObjPtr<mirror::Reference> ref)403 void ReferenceProcessor::ClearReferent(ObjPtr<mirror::Reference> ref) {
404 Thread* self = Thread::Current();
405 MutexLock mu(self, *Locks::reference_processor_lock_);
406 // Need to wait until reference processing is done since IsMarkedHeapReference does not have a
407 // CAS. If we do not wait, it can result in the GC un-clearing references due to race conditions.
408 // This also handles the race where the referent gets cleared after a null check but before
409 // IsMarkedHeapReference is called.
410 WaitUntilDoneProcessingReferences(self);
411 if (Runtime::Current()->IsActiveTransaction()) {
412 ref->ClearReferent<true>();
413 } else {
414 ref->ClearReferent<false>();
415 }
416 }
417
WaitUntilDoneProcessingReferences(Thread * self)418 void ReferenceProcessor::WaitUntilDoneProcessingReferences(Thread* self) {
419 // Wait until we are done processing reference.
420 while ((!gUseReadBarrier && SlowPathEnabled()) ||
421 (gUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
422 // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
423 // presence of threads blocking for weak ref access.
424 self->CheckEmptyCheckpointFromWeakRefAccess(Locks::reference_processor_lock_);
425 condition_.WaitHoldingLocks(self);
426 }
427 }
428
MakeCircularListIfUnenqueued(ObjPtr<mirror::FinalizerReference> reference)429 bool ReferenceProcessor::MakeCircularListIfUnenqueued(
430 ObjPtr<mirror::FinalizerReference> reference) {
431 Thread* self = Thread::Current();
432 MutexLock mu(self, *Locks::reference_processor_lock_);
433 WaitUntilDoneProcessingReferences(self);
434 // At this point, since the sentinel of the reference is live, it is guaranteed to not be
435 // enqueued if we just finished processing references. Otherwise, we may be doing the main GC
436 // phase. Since we are holding the reference processor lock, it guarantees that reference
437 // processing can't begin. The GC could have just enqueued the reference one one of the internal
438 // GC queues, but since we hold the lock finalizer_reference_queue_ lock it also prevents this
439 // race.
440 MutexLock mu2(self, *Locks::reference_queue_finalizer_references_lock_);
441 if (reference->IsUnprocessed()) {
442 CHECK(reference->IsFinalizerReferenceInstance());
443 reference->SetPendingNext(reference);
444 return true;
445 }
446 return false;
447 }
448
449 } // namespace gc
450 } // namespace art
451