• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2013 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_REFERENCE_QUEUE_H_
18 #define ART_RUNTIME_GC_REFERENCE_QUEUE_H_
19 
20 #include <iosfwd>
21 #include <string>
22 #include <vector>
23 
24 #include "base/atomic.h"
25 #include "base/locks.h"
26 #include "base/timing_logger.h"
27 #include "jni.h"
28 #include "obj_ptr.h"
29 #include "offsets.h"
30 #include "runtime_globals.h"
31 #include "thread_pool.h"
32 
33 namespace art {
34 
35 class Mutex;
36 
37 namespace mirror {
38 class Reference;
39 }  // namespace mirror
40 
41 class IsMarkedVisitor;
42 class MarkObjectVisitor;
43 
44 namespace gc {
45 
46 namespace collector {
47 class GarbageCollector;
48 }  // namespace collector
49 
50 class Heap;
51 
52 struct FinalizerStats {
FinalizerStatsFinalizerStats53   FinalizerStats(size_t num_refs, size_t num_enqueued)
54       : num_refs_(num_refs), num_enqueued_(num_enqueued) {}
55   const uint32_t num_refs_;
56   const uint32_t num_enqueued_;
57 };
58 
59 // Used to temporarily store java.lang.ref.Reference(s) during GC and prior to queueing on the
60 // appropriate java.lang.ref.ReferenceQueue. The linked list is maintained as an unordered,
61 // circular, and singly-linked list using the pendingNext fields of the java.lang.ref.Reference
62 // objects.
63 class ReferenceQueue {
64  public:
65   explicit ReferenceQueue(Mutex* lock);
66 
67   // Enqueue a reference if it is unprocessed. Thread safe to call from multiple
68   // threads since it uses a lock to avoid a race between checking for the references presence and
69   // adding it.
70   void AtomicEnqueueIfNotEnqueued(Thread* self, ObjPtr<mirror::Reference> ref)
71       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*lock_);
72 
73   // Enqueue a reference. The reference must be unprocessed.
74   // Not thread safe, used when mutators are paused to minimize lock overhead.
75   void EnqueueReference(ObjPtr<mirror::Reference> ref) REQUIRES_SHARED(Locks::mutator_lock_);
76 
77   // Dequeue a reference from the queue and return that dequeued reference.
78   // Call DisableReadBarrierForReference for the reference that's returned from this function.
79   ObjPtr<mirror::Reference> DequeuePendingReference() REQUIRES_SHARED(Locks::mutator_lock_);
80 
81   // If applicable, disable the read barrier for the reference after its referent is handled (see
82   // ConcurrentCopying::ProcessMarkStackRef.) This must be called for a reference that's dequeued
83   // from pending queue (DequeuePendingReference).
84   void DisableReadBarrierForReference(ObjPtr<mirror::Reference> ref)
85       REQUIRES_SHARED(Locks::mutator_lock_);
86 
87   // Enqueues finalizer references with white referents.  White referents are blackened, moved to
88   // the zombie field, and the referent field is cleared.
89   FinalizerStats EnqueueFinalizerReferences(ReferenceQueue* cleared_references,
90                                   collector::GarbageCollector* collector)
91       REQUIRES_SHARED(Locks::mutator_lock_);
92 
93   // Walks the reference list marking and dequeuing any references subject to the reference
94   // clearing policy.  References with a black referent are removed from the list.  References
95   // with white referents biased toward saving are blackened and also removed from the list.
96   // Returns the number of non-null soft references. May be called concurrently with
97   // AtomicEnqueueIfNotEnqueued().
98   uint32_t ForwardSoftReferences(MarkObjectVisitor* visitor)
99       REQUIRES(!*lock_)
100       REQUIRES_SHARED(Locks::mutator_lock_);
101 
102   // Unlink the reference list clearing references objects with white referents. Cleared references
103   // registered to a reference queue are scheduled for appending by the heap worker thread.
104   void ClearWhiteReferences(ReferenceQueue* cleared_references,
105                             collector::GarbageCollector* collector,
106                             bool report_cleared = false)
107       REQUIRES_SHARED(Locks::mutator_lock_);
108 
109   void Dump(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
110   size_t GetLength() const REQUIRES_SHARED(Locks::mutator_lock_);
111 
IsEmpty()112   bool IsEmpty() const {
113     return list_ == nullptr;
114   }
115 
116   // Clear this queue. Only safe after handing off the contents elsewhere for further processing.
Clear()117   void Clear() {
118     list_ = nullptr;
119   }
120 
GetList()121   mirror::Reference* GetList() REQUIRES_SHARED(Locks::mutator_lock_) {
122     return list_;
123   }
124 
125   // Visits list_, currently only used for the mark compact GC.
126   void UpdateRoots(IsMarkedVisitor* visitor)
127       REQUIRES_SHARED(Locks::mutator_lock_);
128 
129  private:
130   // Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously
131   // calling AtomicEnqueueIfNotEnqueued.
132   Mutex* const lock_;
133   // The actual reference list. Only a root for the mark compact GC since it
134   // will be null during root marking for other GC types. Not an ObjPtr since it
135   // is accessed from multiple threads.  Points to a singly-linked circular list
136   // using the pendingNext field.
137   mirror::Reference* list_;
138 
139   DISALLOW_IMPLICIT_CONSTRUCTORS(ReferenceQueue);
140 };
141 
142 }  // namespace gc
143 }  // namespace art
144 
145 #endif  // ART_RUNTIME_GC_REFERENCE_QUEUE_H_
146