1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "reference_processor.h"
18
19 #include "art_field-inl.h"
20 #include "base/mutex.h"
21 #include "base/time_utils.h"
22 #include "base/utils.h"
23 #include "base/systrace.h"
24 #include "class_root-inl.h"
25 #include "collector/garbage_collector.h"
26 #include "jni/java_vm_ext.h"
27 #include "mirror/class-inl.h"
28 #include "mirror/object-inl.h"
29 #include "mirror/reference-inl.h"
30 #include "nativehelper/scoped_local_ref.h"
31 #include "object_callbacks.h"
32 #include "reflection.h"
33 #include "scoped_thread_state_change-inl.h"
34 #include "task_processor.h"
35 #include "thread-inl.h"
36 #include "thread_pool.h"
37 #include "well_known_classes.h"
38
39 namespace art HIDDEN {
40 namespace gc {
41
42 static constexpr bool kAsyncReferenceQueueAdd = false;
43
ReferenceProcessor()44 ReferenceProcessor::ReferenceProcessor()
45 : collector_(nullptr),
46 condition_("reference processor condition", *Locks::reference_processor_lock_) ,
47 soft_reference_queue_(Locks::reference_queue_soft_references_lock_),
48 weak_reference_queue_(Locks::reference_queue_weak_references_lock_),
49 finalizer_reference_queue_(Locks::reference_queue_finalizer_references_lock_),
50 phantom_reference_queue_(Locks::reference_queue_phantom_references_lock_),
51 cleared_references_(Locks::reference_queue_cleared_references_lock_) {
52 }
53
GetSlowPathFlagOffset(ObjPtr<mirror::Class> reference_class)54 static inline MemberOffset GetSlowPathFlagOffset(ObjPtr<mirror::Class> reference_class)
55 REQUIRES_SHARED(Locks::mutator_lock_) {
56 DCHECK(reference_class == GetClassRoot<mirror::Reference>());
57 // Don't use WellKnownClasses here as it may not be initialized at the point
58 // we're being called.
59 ArtField* field = reference_class->GetField(reference_class->NumFields() - 1);
60 DCHECK(field->IsStatic());
61 DCHECK_STREQ(field->GetName(), "slowPathEnabled");
62 return field->GetOffset();
63 }
64
SetSlowPathFlag(bool enabled)65 static inline void SetSlowPathFlag(bool enabled) REQUIRES_SHARED(Locks::mutator_lock_) {
66 ObjPtr<mirror::Class> reference_class = GetClassRoot<mirror::Reference>();
67 MemberOffset slow_path_offset = GetSlowPathFlagOffset(reference_class);
68 reference_class->SetFieldBoolean</* kTransactionActive= */ false, /* kCheckTransaction= */ false>(
69 slow_path_offset, enabled ? 1 : 0);
70 }
71
EnableSlowPath()72 void ReferenceProcessor::EnableSlowPath() {
73 SetSlowPathFlag(/* enabled= */ true);
74 }
75
DisableSlowPath(Thread * self)76 void ReferenceProcessor::DisableSlowPath(Thread* self) {
77 SetSlowPathFlag(/* enabled= */ false);
78 condition_.Broadcast(self);
79 }
80
SlowPathEnabled()81 bool ReferenceProcessor::SlowPathEnabled() {
82 ObjPtr<mirror::Class> reference_class = GetClassRoot<mirror::Reference>();
83 MemberOffset slow_path_offset = GetSlowPathFlagOffset(reference_class);
84 return reference_class->GetFieldBoolean(slow_path_offset);
85 }
86
BroadcastForSlowPath(Thread * self)87 void ReferenceProcessor::BroadcastForSlowPath(Thread* self) {
88 MutexLock mu(self, *Locks::reference_processor_lock_);
89 condition_.Broadcast(self);
90 }
91
GetReferent(Thread * self,ObjPtr<mirror::Reference> reference)92 ObjPtr<mirror::Object> ReferenceProcessor::GetReferent(Thread* self,
93 ObjPtr<mirror::Reference> reference) {
94 auto slow_path_required = [this, self]() REQUIRES_SHARED(Locks::mutator_lock_) {
95 return gUseReadBarrier ? !self->GetWeakRefAccessEnabled() : SlowPathEnabled();
96 };
97 if (!slow_path_required()) {
98 return reference->GetReferent();
99 }
100 // If the referent is null then it is already cleared, we can just return null since there is no
101 // scenario where it becomes non-null during the reference processing phase.
102 // A read barrier may be unsafe here, and we use the result only when it's null or marked.
103 ObjPtr<mirror::Object> referent = reference->template GetReferent<kWithoutReadBarrier>();
104 if (referent.IsNull()) {
105 return referent;
106 }
107
108 bool started_trace = false;
109 uint64_t start_millis;
110 auto finish_trace = [](uint64_t start_millis) {
111 ATraceEnd();
112 uint64_t millis = MilliTime() - start_millis;
113 static constexpr uint64_t kReportMillis = 10; // Long enough to risk dropped frames.
114 if (millis > kReportMillis) {
115 LOG(WARNING) << "Weak pointer dereference blocked for " << millis << " milliseconds.";
116 }
117 };
118
119 MutexLock mu(self, *Locks::reference_processor_lock_);
120 // Keeping reference_processor_lock_ blocks the broadcast when we try to reenable the fast path.
121 while (slow_path_required()) {
122 DCHECK(collector_ != nullptr);
123 const bool other_read_barrier = !kUseBakerReadBarrier && gUseReadBarrier;
124 if (UNLIKELY(reference->IsFinalizerReferenceInstance()
125 || rp_state_ == RpState::kStarting /* too early to determine mark state */
126 || (other_read_barrier && reference->IsPhantomReferenceInstance()))) {
127 // Odd cases in which it doesn't hurt to just wait, or the wait is likely to be very brief.
128
129 // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
130 // presence of threads blocking for weak ref access.
131 self->CheckEmptyCheckpointFromWeakRefAccess(Locks::reference_processor_lock_);
132 if (!started_trace) {
133 ATraceBegin("GetReferent blocked");
134 started_trace = true;
135 start_millis = MilliTime();
136 }
137 condition_.WaitHoldingLocks(self);
138 continue;
139 }
140 DCHECK(!reference->IsPhantomReferenceInstance());
141
142 if (rp_state_ == RpState::kInitClearingDone) {
143 // Reachable references have their final referent values.
144 break;
145 }
146 // Although reference processing is not done, we can always predict the correct return value
147 // based on the current mark state. No additional marking from finalizers has been done, since
148 // we hold reference_processor_lock_, which is required to advance to kInitClearingDone.
149 DCHECK(rp_state_ == RpState::kInitMarkingDone);
150 // Re-load and re-check referent, since the current one may have been read before we acquired
151 // reference_lock. In particular a Reference.clear() call may have intervened. (b/33569625)
152 referent = reference->GetReferent<kWithoutReadBarrier>();
153 ObjPtr<mirror::Object> forwarded_ref =
154 referent.IsNull() ? nullptr : collector_->IsMarked(referent.Ptr());
155 // Either the referent was marked, and forwarded_ref is the correct return value, or it
156 // was not, and forwarded_ref == null, which is again the correct return value.
157 if (started_trace) {
158 finish_trace(start_millis);
159 }
160 return forwarded_ref;
161 }
162 if (started_trace) {
163 finish_trace(start_millis);
164 }
165 return reference->GetReferent();
166 }
167
168 // Forward SoftReferences. Can be done before we disable Reference access. Only
169 // invoked if we are not clearing SoftReferences.
ForwardSoftReferences(TimingLogger * timings)170 uint32_t ReferenceProcessor::ForwardSoftReferences(TimingLogger* timings) {
171 TimingLogger::ScopedTiming split(
172 concurrent_ ? "ForwardSoftReferences" : "(Paused)ForwardSoftReferences", timings);
173 // We used to argue that we should be smarter about doing this conditionally, but it's unclear
174 // that's actually better than the more predictable strategy of basically only clearing
175 // SoftReferences just before we would otherwise run out of memory.
176 uint32_t non_null_refs = soft_reference_queue_.ForwardSoftReferences(collector_);
177 if (ATraceEnabled()) {
178 static constexpr size_t kBufSize = 80;
179 char buf[kBufSize];
180 snprintf(buf, kBufSize, "Marking for %" PRIu32 " SoftReferences", non_null_refs);
181 ATraceBegin(buf);
182 collector_->ProcessMarkStack();
183 ATraceEnd();
184 } else {
185 collector_->ProcessMarkStack();
186 }
187 return non_null_refs;
188 }
189
Setup(Thread * self,collector::GarbageCollector * collector,bool concurrent,bool clear_soft_references)190 void ReferenceProcessor::Setup(Thread* self,
191 collector::GarbageCollector* collector,
192 bool concurrent,
193 bool clear_soft_references) {
194 DCHECK(collector != nullptr);
195 MutexLock mu(self, *Locks::reference_processor_lock_);
196 collector_ = collector;
197 rp_state_ = RpState::kStarting;
198 concurrent_ = concurrent;
199 clear_soft_references_ = clear_soft_references;
200 }
201
202 // Process reference class instances and schedule finalizations.
203 // We advance rp_state_ to signal partial completion for the benefit of GetReferent.
ProcessReferences(Thread * self,TimingLogger * timings)204 void ReferenceProcessor::ProcessReferences(Thread* self, TimingLogger* timings) {
205 TimingLogger::ScopedTiming t(concurrent_ ? __FUNCTION__ : "(Paused)ProcessReferences", timings);
206 if (!clear_soft_references_) {
207 // Forward any additional SoftReferences we discovered late, now that reference access has been
208 // inhibited.
209 while (!soft_reference_queue_.IsEmpty()) {
210 ForwardSoftReferences(timings);
211 }
212 }
213 {
214 MutexLock mu(self, *Locks::reference_processor_lock_);
215 if (!gUseReadBarrier) {
216 CHECK_EQ(SlowPathEnabled(), concurrent_) << "Slow path must be enabled iff concurrent";
217 } else {
218 // Weak ref access is enabled at Zygote compaction by SemiSpace (concurrent_ == false).
219 CHECK_EQ(!self->GetWeakRefAccessEnabled(), concurrent_);
220 }
221 DCHECK(rp_state_ == RpState::kStarting);
222 rp_state_ = RpState::kInitMarkingDone;
223 condition_.Broadcast(self);
224 }
225 if (kIsDebugBuild && collector_->IsTransactionActive()) {
226 // In transaction mode, we shouldn't enqueue any Reference to the queues.
227 // See DelayReferenceReferent().
228 DCHECK(soft_reference_queue_.IsEmpty());
229 DCHECK(weak_reference_queue_.IsEmpty());
230 DCHECK(finalizer_reference_queue_.IsEmpty());
231 DCHECK(phantom_reference_queue_.IsEmpty());
232 }
233 // Clear all remaining soft and weak references with white referents.
234 // This misses references only reachable through finalizers.
235 soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector_);
236 weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector_);
237 // Defer PhantomReference processing until we've finished marking through finalizers.
238 {
239 // TODO: Capture mark state of some system weaks here. If the referent was marked here,
240 // then it is now safe to return, since it can only refer to marked objects. If it becomes
241 // marked below, that is no longer guaranteed.
242 MutexLock mu(self, *Locks::reference_processor_lock_);
243 rp_state_ = RpState::kInitClearingDone;
244 // At this point, all mutator-accessible data is marked (black). Objects enqueued for
245 // finalization will only be made available to the mutator via CollectClearedReferences after
246 // we're fully done marking. Soft and WeakReferences accessible to the mutator have been
247 // processed and refer only to black objects. Thus there is no danger of the mutator getting
248 // access to non-black objects. Weak reference processing is still nominally suspended,
249 // But many kinds of references, including all java.lang.ref ones, are handled normally from
250 // here on. See GetReferent().
251 }
252 {
253 TimingLogger::ScopedTiming t2(
254 concurrent_ ? "EnqueueFinalizerReferences" : "(Paused)EnqueueFinalizerReferences", timings);
255 // Preserve all white objects with finalize methods and schedule them for finalization.
256 FinalizerStats finalizer_stats =
257 finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, collector_);
258 if (ATraceEnabled()) {
259 static constexpr size_t kBufSize = 80;
260 char buf[kBufSize];
261 snprintf(buf, kBufSize, "Marking from %" PRIu32 " / %" PRIu32 " finalizers",
262 finalizer_stats.num_enqueued_, finalizer_stats.num_refs_);
263 ATraceBegin(buf);
264 collector_->ProcessMarkStack();
265 ATraceEnd();
266 } else {
267 collector_->ProcessMarkStack();
268 }
269 }
270
271 // Process all soft and weak references with white referents, where the references are reachable
272 // only from finalizers. It is unclear that there is any way to do this without slightly
273 // violating some language spec. We choose to apply normal Reference processing rules for these.
274 // This exposes the following issues:
275 // 1) In the case of an unmarked referent, we may end up enqueuing an "unreachable" reference.
276 // This appears unavoidable, since we need to clear the reference for safety, unless we
277 // mark the referent and undo finalization decisions for objects we encounter during marking.
278 // (Some versions of the RI seem to do something along these lines.)
279 // Or we could clear the reference without enqueuing it, which also seems strange and
280 // unhelpful.
281 // 2) In the case of a marked referent, we will preserve a reference to objects that may have
282 // been enqueued for finalization. Again fixing this would seem to involve at least undoing
283 // previous finalization / reference clearing decisions. (This would also mean than an object
284 // containing both a strong and a WeakReference to the same referent could see the
285 // WeakReference cleared.)
286 // The treatment in (2) is potentially quite dangerous, since Reference.get() can e.g. return a
287 // finalized object containing pointers to native objects that have already been deallocated.
288 // But it can be argued that this is just an instance of the broader rule that it is not safe
289 // for finalizers to access otherwise inaccessible finalizable objects.
290 soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector_,
291 /*report_cleared=*/ true);
292 weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector_,
293 /*report_cleared=*/ true);
294
295 // Clear all phantom references with white referents. It's fine to do this just once here.
296 phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, collector_);
297
298 // At this point all reference queues other than the cleared references should be empty.
299 DCHECK(soft_reference_queue_.IsEmpty());
300 DCHECK(weak_reference_queue_.IsEmpty());
301 DCHECK(finalizer_reference_queue_.IsEmpty());
302 DCHECK(phantom_reference_queue_.IsEmpty());
303
304 {
305 MutexLock mu(self, *Locks::reference_processor_lock_);
306 // Need to always do this since the next GC may be concurrent. Doing this for only concurrent
307 // could result in a stale is_marked_callback_ being called before the reference processing
308 // starts since there is a small window of time where slow_path_enabled_ is enabled but the
309 // callback isn't yet set.
310 if (!gUseReadBarrier && concurrent_) {
311 // Done processing, disable the slow path and broadcast to the waiters.
312 DisableSlowPath(self);
313 }
314 }
315 }
316
317 // Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
318 // marked, put it on the appropriate list in the heap for later processing.
DelayReferenceReferent(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref,collector::GarbageCollector * collector)319 void ReferenceProcessor::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
320 ObjPtr<mirror::Reference> ref,
321 collector::GarbageCollector* collector) {
322 // klass can be the class of the old object if the visitor already updated the class of ref.
323 DCHECK(klass != nullptr);
324 DCHECK(klass->IsTypeOfReferenceClass());
325 mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
326 // do_atomic_update needs to be true because this happens outside of the reference processing
327 // phase.
328 if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update=*/true)) {
329 if (UNLIKELY(collector->IsTransactionActive())) {
330 // In transaction mode, keep the referent alive and avoid any reference processing to avoid the
331 // issue of rolling back reference processing. do_atomic_update needs to be true because this
332 // happens outside of the reference processing phase.
333 if (!referent->IsNull()) {
334 collector->MarkHeapReference(referent, /*do_atomic_update=*/ true);
335 }
336 return;
337 }
338 Thread* self = Thread::Current();
339 // TODO: Remove these locks, and use atomic stacks for storing references?
340 // We need to check that the references haven't already been enqueued since we can end up
341 // scanning the same reference multiple times due to dirty cards.
342 if (klass->IsSoftReferenceClass()) {
343 soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
344 } else if (klass->IsWeakReferenceClass()) {
345 weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
346 } else if (klass->IsFinalizerReferenceClass()) {
347 finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
348 } else if (klass->IsPhantomReferenceClass()) {
349 phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
350 } else {
351 LOG(FATAL) << "Invalid reference type " << klass->PrettyClass() << " " << std::hex
352 << klass->GetAccessFlags();
353 }
354 }
355 }
356
UpdateRoots(IsMarkedVisitor * visitor)357 void ReferenceProcessor::UpdateRoots(IsMarkedVisitor* visitor) {
358 cleared_references_.UpdateRoots(visitor);
359 }
360
361 class ClearedReferenceTask : public HeapTask {
362 public:
ClearedReferenceTask(jobject cleared_references)363 explicit ClearedReferenceTask(jobject cleared_references)
364 : HeapTask(NanoTime()), cleared_references_(cleared_references) {
365 }
Run(Thread * thread)366 void Run(Thread* thread) override {
367 ScopedObjectAccess soa(thread);
368 WellKnownClasses::java_lang_ref_ReferenceQueue_add->InvokeStatic<'V', 'L'>(
369 thread, soa.Decode<mirror::Object>(cleared_references_));
370 soa.Env()->DeleteGlobalRef(cleared_references_);
371 }
372
373 private:
374 const jobject cleared_references_;
375 };
376
CollectClearedReferences(Thread * self)377 SelfDeletingTask* ReferenceProcessor::CollectClearedReferences(Thread* self) {
378 Locks::mutator_lock_->AssertNotHeld(self);
379 // By default we don't actually need to do anything. Just return this no-op task to avoid having
380 // to put in ifs.
381 std::unique_ptr<SelfDeletingTask> result(new FunctionTask([](Thread*) {}));
382 // When a runtime isn't started there are no reference queues to care about so ignore.
383 if (!cleared_references_.IsEmpty()) {
384 if (LIKELY(Runtime::Current()->IsStarted())) {
385 jobject cleared_references;
386 {
387 ReaderMutexLock mu(self, *Locks::mutator_lock_);
388 cleared_references = self->GetJniEnv()->GetVm()->AddGlobalRef(
389 self, cleared_references_.GetList());
390 }
391 if (kAsyncReferenceQueueAdd) {
392 // TODO: This can cause RunFinalization to terminate before newly freed objects are
393 // finalized since they may not be enqueued by the time RunFinalization starts.
394 Runtime::Current()->GetHeap()->GetTaskProcessor()->AddTask(
395 self, new ClearedReferenceTask(cleared_references));
396 } else {
397 result.reset(new ClearedReferenceTask(cleared_references));
398 }
399 }
400 cleared_references_.Clear();
401 }
402 return result.release();
403 }
404
ClearReferent(ObjPtr<mirror::Reference> ref)405 void ReferenceProcessor::ClearReferent(ObjPtr<mirror::Reference> ref) {
406 Thread* self = Thread::Current();
407 MutexLock mu(self, *Locks::reference_processor_lock_);
408 // Need to wait until reference processing is done since IsMarkedHeapReference does not have a
409 // CAS. If we do not wait, it can result in the GC un-clearing references due to race conditions.
410 // This also handles the race where the referent gets cleared after a null check but before
411 // IsMarkedHeapReference is called.
412 WaitUntilDoneProcessingReferences(self);
413 if (Runtime::Current()->IsActiveTransaction()) {
414 ref->ClearReferent<true>();
415 } else {
416 ref->ClearReferent<false>();
417 }
418 }
419
WaitUntilDoneProcessingReferences(Thread * self)420 void ReferenceProcessor::WaitUntilDoneProcessingReferences(Thread* self) {
421 // Wait until we are done processing reference.
422 while ((!gUseReadBarrier && SlowPathEnabled()) ||
423 (gUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
424 // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
425 // presence of threads blocking for weak ref access.
426 self->CheckEmptyCheckpointFromWeakRefAccess(Locks::reference_processor_lock_);
427 condition_.WaitHoldingLocks(self);
428 }
429 }
430
MakeCircularListIfUnenqueued(ObjPtr<mirror::FinalizerReference> reference)431 bool ReferenceProcessor::MakeCircularListIfUnenqueued(
432 ObjPtr<mirror::FinalizerReference> reference) {
433 Thread* self = Thread::Current();
434 MutexLock mu(self, *Locks::reference_processor_lock_);
435 WaitUntilDoneProcessingReferences(self);
436 // At this point, since the sentinel of the reference is live, it is guaranteed to not be
437 // enqueued if we just finished processing references. Otherwise, we may be doing the main GC
438 // phase. Since we are holding the reference processor lock, it guarantees that reference
439 // processing can't begin. The GC could have just enqueued the reference one one of the internal
440 // GC queues, but since we hold the lock finalizer_reference_queue_ lock it also prevents this
441 // race.
442 MutexLock mu2(self, *Locks::reference_queue_finalizer_references_lock_);
443 if (reference->IsUnprocessed()) {
444 CHECK(reference->IsFinalizerReferenceInstance());
445 reference->SetPendingNext(reference);
446 return true;
447 }
448 return false;
449 }
450
451 } // namespace gc
452 } // namespace art
453