• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2013 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "reference_queue.h"
18 
19 #include "accounting/card_table-inl.h"
20 #include "collector/concurrent_copying.h"
21 #include "heap.h"
22 #include "mirror/class-inl.h"
23 #include "mirror/object-inl.h"
24 #include "mirror/reference-inl.h"
25 #include "object_callbacks.h"
26 
27 namespace art {
28 namespace gc {
29 
ReferenceQueue(Mutex * lock)30 ReferenceQueue::ReferenceQueue(Mutex* lock) : lock_(lock), list_(nullptr) {
31 }
32 
AtomicEnqueueIfNotEnqueued(Thread * self,ObjPtr<mirror::Reference> ref)33 void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, ObjPtr<mirror::Reference> ref) {
34   DCHECK(ref != nullptr);
35   MutexLock mu(self, *lock_);
36   if (ref->IsUnprocessed()) {
37     EnqueueReference(ref);
38   }
39 }
40 
EnqueueReference(ObjPtr<mirror::Reference> ref)41 void ReferenceQueue::EnqueueReference(ObjPtr<mirror::Reference> ref) {
42   DCHECK(ref != nullptr);
43   CHECK(ref->IsUnprocessed());
44   if (IsEmpty()) {
45     // 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
46     list_ = ref.Ptr();
47   } else {
48     // The list is owned by the GC, everything that has been inserted must already be at least
49     // gray.
50     ObjPtr<mirror::Reference> head = list_->GetPendingNext<kWithoutReadBarrier>();
51     DCHECK(head != nullptr);
52     ref->SetPendingNext(head);
53   }
54   // Add the reference in the middle to preserve the cycle.
55   list_->SetPendingNext(ref);
56 }
57 
DequeuePendingReference()58 ObjPtr<mirror::Reference> ReferenceQueue::DequeuePendingReference() {
59   DCHECK(!IsEmpty());
60   ObjPtr<mirror::Reference> ref = list_->GetPendingNext<kWithoutReadBarrier>();
61   DCHECK(ref != nullptr);
62   // Note: the following code is thread-safe because it is only called from ProcessReferences which
63   // is single threaded.
64   if (list_ == ref) {
65     list_ = nullptr;
66   } else {
67     ObjPtr<mirror::Reference> next = ref->GetPendingNext<kWithoutReadBarrier>();
68     list_->SetPendingNext(next);
69   }
70   ref->SetPendingNext(nullptr);
71   return ref;
72 }
73 
74 // This must be called whenever DequeuePendingReference is called.
DisableReadBarrierForReference(ObjPtr<mirror::Reference> ref)75 void ReferenceQueue::DisableReadBarrierForReference(ObjPtr<mirror::Reference> ref) {
76   Heap* heap = Runtime::Current()->GetHeap();
77   if (kUseBakerOrBrooksReadBarrier && heap->CurrentCollectorType() == kCollectorTypeCC &&
78       heap->ConcurrentCopyingCollector()->IsActive()) {
79     // Change the gray ptr we left in ConcurrentCopying::ProcessMarkStackRef() to white.
80     // We check IsActive() above because we don't want to do this when the zygote compaction
81     // collector (SemiSpace) is running.
82     CHECK(ref != nullptr);
83     collector::ConcurrentCopying* concurrent_copying = heap->ConcurrentCopyingCollector();
84     uint32_t rb_state = ref->GetReadBarrierState();
85     if (rb_state == ReadBarrier::GrayState()) {
86       ref->AtomicSetReadBarrierState(ReadBarrier::GrayState(), ReadBarrier::WhiteState());
87       CHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::WhiteState());
88     } else {
89       // In ConcurrentCopying::ProcessMarkStackRef() we may leave a white reference in the queue and
90       // find it here, which is OK.
91       CHECK_EQ(rb_state, ReadBarrier::WhiteState()) << "ref=" << ref << " rb_state=" << rb_state;
92       ObjPtr<mirror::Object> referent = ref->GetReferent<kWithoutReadBarrier>();
93       // The referent could be null if it's cleared by a mutator (Reference.clear()).
94       if (referent != nullptr) {
95         CHECK(concurrent_copying->IsInToSpace(referent.Ptr()))
96             << "ref=" << ref << " rb_state=" << ref->GetReadBarrierState()
97             << " referent=" << referent;
98       }
99     }
100   }
101 }
102 
Dump(std::ostream & os) const103 void ReferenceQueue::Dump(std::ostream& os) const {
104   ObjPtr<mirror::Reference> cur = list_;
105   os << "Reference starting at list_=" << list_ << "\n";
106   if (cur == nullptr) {
107     return;
108   }
109   do {
110     ObjPtr<mirror::Reference> pending_next = cur->GetPendingNext();
111     os << "Reference= " << cur << " PendingNext=" << pending_next;
112     if (cur->IsFinalizerReferenceInstance()) {
113       os << " Zombie=" << cur->AsFinalizerReference()->GetZombie();
114     }
115     os << "\n";
116     cur = pending_next;
117   } while (cur != list_);
118 }
119 
GetLength() const120 size_t ReferenceQueue::GetLength() const {
121   size_t count = 0;
122   ObjPtr<mirror::Reference> cur = list_;
123   if (cur != nullptr) {
124     do {
125       ++count;
126       cur = cur->GetPendingNext();
127     } while (cur != list_);
128   }
129   return count;
130 }
131 
ClearWhiteReferences(ReferenceQueue * cleared_references,collector::GarbageCollector * collector)132 void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
133                                           collector::GarbageCollector* collector) {
134   while (!IsEmpty()) {
135     ObjPtr<mirror::Reference> ref = DequeuePendingReference();
136     mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
137     // do_atomic_update is false because this happens during the reference processing phase where
138     // Reference.clear() would block.
139     if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
140       // Referent is white, clear it.
141       if (Runtime::Current()->IsActiveTransaction()) {
142         ref->ClearReferent<true>();
143       } else {
144         ref->ClearReferent<false>();
145       }
146       cleared_references->EnqueueReference(ref);
147     }
148     // Delay disabling the read barrier until here so that the ClearReferent call above in
149     // transaction mode will trigger the read barrier.
150     DisableReadBarrierForReference(ref);
151   }
152 }
153 
EnqueueFinalizerReferences(ReferenceQueue * cleared_references,collector::GarbageCollector * collector)154 void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleared_references,
155                                                 collector::GarbageCollector* collector) {
156   while (!IsEmpty()) {
157     ObjPtr<mirror::FinalizerReference> ref = DequeuePendingReference()->AsFinalizerReference();
158     mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
159     // do_atomic_update is false because this happens during the reference processing phase where
160     // Reference.clear() would block.
161     if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
162       ObjPtr<mirror::Object> forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
163       // Move the updated referent to the zombie field.
164       if (Runtime::Current()->IsActiveTransaction()) {
165         ref->SetZombie<true>(forward_address);
166         ref->ClearReferent<true>();
167       } else {
168         ref->SetZombie<false>(forward_address);
169         ref->ClearReferent<false>();
170       }
171       cleared_references->EnqueueReference(ref);
172     }
173     // Delay disabling the read barrier until here so that the ClearReferent call above in
174     // transaction mode will trigger the read barrier.
175     DisableReadBarrierForReference(ref->AsReference());
176   }
177 }
178 
ForwardSoftReferences(MarkObjectVisitor * visitor)179 void ReferenceQueue::ForwardSoftReferences(MarkObjectVisitor* visitor) {
180   if (UNLIKELY(IsEmpty())) {
181     return;
182   }
183   ObjPtr<mirror::Reference> const head = list_;
184   ObjPtr<mirror::Reference> ref = head;
185   do {
186     mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
187     if (referent_addr->AsMirrorPtr() != nullptr) {
188       // do_atomic_update is false because mutators can't access the referent due to the weak ref
189       // access blocking.
190       visitor->MarkHeapReference(referent_addr, /*do_atomic_update*/ false);
191     }
192     ref = ref->GetPendingNext();
193   } while (LIKELY(ref != head));
194 }
195 
UpdateRoots(IsMarkedVisitor * visitor)196 void ReferenceQueue::UpdateRoots(IsMarkedVisitor* visitor) {
197   if (list_ != nullptr) {
198     list_ = down_cast<mirror::Reference*>(visitor->IsMarked(list_));
199   }
200 }
201 
202 }  // namespace gc
203 }  // namespace art
204