• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2009 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_
18 #define ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_
19 
20 #include <stdint.h>
21 
22 #include <iosfwd>
23 #include <limits>
24 #include <string>
25 
26 #include "base/bit_utils.h"
27 #include "base/logging.h"
28 #include "base/mutex.h"
29 #include "gc_root.h"
30 #include "obj_ptr.h"
31 #include "offsets.h"
32 #include "read_barrier_option.h"
33 
34 namespace art {
35 
36 class RootInfo;
37 
38 namespace mirror {
39 class Object;
40 }  // namespace mirror
41 
42 class MemMap;
43 
44 // Maintain a table of indirect references.  Used for local/global JNI references.
45 //
46 // The table contains object references, where the strong (local/global) references are part of the
47 // GC root set (but not the weak global references). When an object is added we return an
48 // IndirectRef that is not a valid pointer but can be used to find the original value in O(1) time.
49 // Conversions to and from indirect references are performed on upcalls and downcalls, so they need
50 // to be very fast.
51 //
52 // To be efficient for JNI local variable storage, we need to provide operations that allow us to
53 // operate on segments of the table, where segments are pushed and popped as if on a stack. For
54 // example, deletion of an entry should only succeed if it appears in the current segment, and we
55 // want to be able to strip off the current segment quickly when a method returns. Additions to the
56 // table must be made in the current segment even if space is available in an earlier area.
57 //
58 // A new segment is created when we call into native code from interpreted code, or when we handle
59 // the JNI PushLocalFrame function.
60 //
61 // The GC must be able to scan the entire table quickly.
62 //
63 // In summary, these must be very fast:
64 //  - adding or removing a segment
65 //  - adding references to a new segment
66 //  - converting an indirect reference back to an Object
67 // These can be a little slower, but must still be pretty quick:
68 //  - adding references to a "mature" segment
69 //  - removing individual references
70 //  - scanning the entire table straight through
71 //
72 // If there's more than one segment, we don't guarantee that the table will fill completely before
73 // we fail due to lack of space. We do ensure that the current segment will pack tightly, which
74 // should satisfy JNI requirements (e.g. EnsureLocalCapacity).
75 //
76 // Only SynchronizedGet is synchronized.
77 
78 // Indirect reference definition.  This must be interchangeable with JNI's jobject, and it's
79 // convenient to let null be null, so we use void*.
80 //
81 // We need a (potentially) large table index and a 2-bit reference type (global, local, weak
82 // global). We also reserve some bits to be used to detect stale indirect references: we put a
83 // serial number in the extra bits, and keep a copy of the serial number in the table. This requires
84 // more memory and additional memory accesses on add/get, but is moving-GC safe. It will catch
85 // additional problems, e.g.: create iref1 for obj, delete iref1, create iref2 for same obj,
86 // lookup iref1. A pattern based on object bits will miss this.
87 typedef void* IndirectRef;
88 
89 // Indirect reference kind, used as the two low bits of IndirectRef.
90 //
91 // For convenience these match up with enum jobjectRefType from jni.h.
92 enum IndirectRefKind {
93   kHandleScopeOrInvalid = 0,           // <<stack indirect reference table or invalid reference>>
94   kLocal                = 1,           // <<local reference>>
95   kGlobal               = 2,           // <<global reference>>
96   kWeakGlobal           = 3,           // <<weak global reference>>
97   kLastKind             = kWeakGlobal
98 };
99 std::ostream& operator<<(std::ostream& os, const IndirectRefKind& rhs);
100 const char* GetIndirectRefKindString(const IndirectRefKind& kind);
101 
102 // Table definition.
103 //
104 // For the global reference table, the expected common operations are adding a new entry and
105 // removing a recently-added entry (usually the most-recently-added entry).  For JNI local
106 // references, the common operations are adding a new entry and removing an entire table segment.
107 //
108 // If we delete entries from the middle of the list, we will be left with "holes".  We track the
109 // number of holes so that, when adding new elements, we can quickly decide to do a trivial append
110 // or go slot-hunting.
111 //
112 // When the top-most entry is removed, any holes immediately below it are also removed. Thus,
113 // deletion of an entry may reduce "top_index" by more than one.
114 //
115 // To get the desired behavior for JNI locals, we need to know the bottom and top of the current
116 // "segment". The top is managed internally, and the bottom is passed in as a function argument.
117 // When we call a native method or push a local frame, the current top index gets pushed on, and
118 // serves as the new bottom. When we pop a frame off, the value from the stack becomes the new top
119 // index, and the value stored in the previous frame becomes the new bottom.
120 //
121 // Holes are being locally cached for the segment. Otherwise we'd have to pass bottom index and
122 // number of holes, which restricts us to 16 bits for the top index. The value is cached within the
123 // table. To avoid code in generated JNI transitions, which implicitly form segments, the code for
124 // adding and removing references needs to detect the change of a segment. Helper fields are used
125 // for this detection.
126 //
127 // Common alternative implementation: make IndirectRef a pointer to the actual reference slot.
128 // Instead of getting a table and doing a lookup, the lookup can be done instantly. Operations like
129 // determining the type and deleting the reference are more expensive because the table must be
130 // hunted for (i.e. you have to do a pointer comparison to see which table it's in), you can't move
131 // the table when expanding it (so realloc() is out), and tricks like serial number checking to
132 // detect stale references aren't possible (though we may be able to get similar benefits with other
133 // approaches).
134 //
135 // TODO: consider a "lastDeleteIndex" for quick hole-filling when an add immediately follows a
136 // delete; must invalidate after segment pop might be worth only using it for JNI globals.
137 //
138 // TODO: may want completely different add/remove algorithms for global and local refs to improve
139 // performance.  A large circular buffer might reduce the amortized cost of adding global
140 // references.
141 
142 // The state of the current segment. We only store the index. Splitting it for index and hole
143 // count restricts the range too much.
144 struct IRTSegmentState {
145   uint32_t top_index;
146 };
147 
148 // Use as initial value for "cookie", and when table has only one segment.
149 static constexpr IRTSegmentState kIRTFirstSegment = { 0 };
150 
151 // Try to choose kIRTPrevCount so that sizeof(IrtEntry) is a power of 2.
152 // Contains multiple entries but only one active one, this helps us detect use after free errors
153 // since the serial stored in the indirect ref wont match.
154 static constexpr size_t kIRTPrevCount = kIsDebugBuild ? 7 : 3;
155 
156 class IrtEntry {
157  public:
158   void Add(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
159 
GetReference()160   GcRoot<mirror::Object>* GetReference() {
161     DCHECK_LT(serial_, kIRTPrevCount);
162     return &references_[serial_];
163   }
164 
GetReference()165   const GcRoot<mirror::Object>* GetReference() const {
166     DCHECK_LT(serial_, kIRTPrevCount);
167     return &references_[serial_];
168   }
169 
GetSerial()170   uint32_t GetSerial() const {
171     return serial_;
172   }
173 
174   void SetReference(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
175 
176  private:
177   uint32_t serial_;
178   GcRoot<mirror::Object> references_[kIRTPrevCount];
179 };
180 static_assert(sizeof(IrtEntry) == (1 + kIRTPrevCount) * sizeof(uint32_t),
181               "Unexpected sizeof(IrtEntry)");
182 static_assert(IsPowerOfTwo(sizeof(IrtEntry)), "Unexpected sizeof(IrtEntry)");
183 
184 class IrtIterator {
185  public:
IrtIterator(IrtEntry * table,size_t i,size_t capacity)186   IrtIterator(IrtEntry* table, size_t i, size_t capacity) REQUIRES_SHARED(Locks::mutator_lock_)
187       : table_(table), i_(i), capacity_(capacity) {
188   }
189 
190   IrtIterator& operator++() REQUIRES_SHARED(Locks::mutator_lock_) {
191     ++i_;
192     return *this;
193   }
194 
REQUIRES_SHARED(Locks::mutator_lock_)195   GcRoot<mirror::Object>* operator*() REQUIRES_SHARED(Locks::mutator_lock_) {
196     // This does not have a read barrier as this is used to visit roots.
197     return table_[i_].GetReference();
198   }
199 
equals(const IrtIterator & rhs)200   bool equals(const IrtIterator& rhs) const {
201     return (i_ == rhs.i_ && table_ == rhs.table_);
202   }
203 
204  private:
205   IrtEntry* const table_;
206   size_t i_;
207   const size_t capacity_;
208 };
209 
210 bool inline operator==(const IrtIterator& lhs, const IrtIterator& rhs) {
211   return lhs.equals(rhs);
212 }
213 
214 bool inline operator!=(const IrtIterator& lhs, const IrtIterator& rhs) {
215   return !lhs.equals(rhs);
216 }
217 
218 class IndirectReferenceTable {
219  public:
220   enum class ResizableCapacity {
221     kNo,
222     kYes
223   };
224 
225   // WARNING: Construction of the IndirectReferenceTable may fail.
226   // error_msg must not be null. If error_msg is set by the constructor, then
227   // construction has failed and the IndirectReferenceTable will be in an
228   // invalid state. Use IsValid to check whether the object is in an invalid
229   // state.
230   IndirectReferenceTable(size_t max_count,
231                          IndirectRefKind kind,
232                          ResizableCapacity resizable,
233                          std::string* error_msg);
234 
235   ~IndirectReferenceTable();
236 
237   /*
238    * Checks whether construction of the IndirectReferenceTable succeeded.
239    *
240    * This object must only be used if IsValid() returns true. It is safe to
241    * call IsValid from multiple threads without locking or other explicit
242    * synchronization.
243    */
244   bool IsValid() const;
245 
246   // Add a new entry. "obj" must be a valid non-null object reference. This function will
247   // abort if the table is full (max entries reached, or expansion failed).
248   IndirectRef Add(IRTSegmentState previous_state, ObjPtr<mirror::Object> obj)
249       REQUIRES_SHARED(Locks::mutator_lock_);
250 
251   // Given an IndirectRef in the table, return the Object it refers to.
252   //
253   // This function may abort under error conditions.
254   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
255   ObjPtr<mirror::Object> Get(IndirectRef iref) const REQUIRES_SHARED(Locks::mutator_lock_)
256       ALWAYS_INLINE;
257 
258   // Synchronized get which reads a reference, acquiring a lock if necessary.
259   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
SynchronizedGet(IndirectRef iref)260   ObjPtr<mirror::Object> SynchronizedGet(IndirectRef iref) const
261       REQUIRES_SHARED(Locks::mutator_lock_) {
262     return Get<kReadBarrierOption>(iref);
263   }
264 
265   // Updates an existing indirect reference to point to a new object.
266   void Update(IndirectRef iref, ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
267 
268   // Remove an existing entry.
269   //
270   // If the entry is not between the current top index and the bottom index
271   // specified by the cookie, we don't remove anything.  This is the behavior
272   // required by JNI's DeleteLocalRef function.
273   //
274   // Returns "false" if nothing was removed.
275   bool Remove(IRTSegmentState previous_state, IndirectRef iref);
276 
277   void AssertEmpty() REQUIRES_SHARED(Locks::mutator_lock_);
278 
279   void Dump(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
280 
281   // Return the #of entries in the entire table.  This includes holes, and
282   // so may be larger than the actual number of "live" entries.
Capacity()283   size_t Capacity() const {
284     return segment_state_.top_index;
285   }
286 
287   // Ensure that at least free_capacity elements are available, or return false.
288   bool EnsureFreeCapacity(size_t free_capacity, std::string* error_msg)
289       REQUIRES_SHARED(Locks::mutator_lock_);
290   // See implementation of EnsureFreeCapacity. We'll only state here how much is trivially free,
291   // without recovering holes. Thus this is a conservative estimate.
292   size_t FreeCapacity() REQUIRES_SHARED(Locks::mutator_lock_);
293 
294   // Note IrtIterator does not have a read barrier as it's used to visit roots.
begin()295   IrtIterator begin() {
296     return IrtIterator(table_, 0, Capacity());
297   }
298 
end()299   IrtIterator end() {
300     return IrtIterator(table_, Capacity(), Capacity());
301   }
302 
303   void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
304       REQUIRES_SHARED(Locks::mutator_lock_);
305 
GetSegmentState()306   IRTSegmentState GetSegmentState() const {
307     return segment_state_;
308   }
309 
310   void SetSegmentState(IRTSegmentState new_state);
311 
SegmentStateOffset(size_t pointer_size ATTRIBUTE_UNUSED)312   static Offset SegmentStateOffset(size_t pointer_size ATTRIBUTE_UNUSED) {
313     // Note: Currently segment_state_ is at offset 0. We're testing the expected value in
314     //       jni_internal_test to make sure it stays correct. It is not OFFSETOF_MEMBER, as that
315     //       is not pointer-size-safe.
316     return Offset(0);
317   }
318 
319   // Release pages past the end of the table that may have previously held references.
320   void Trim() REQUIRES_SHARED(Locks::mutator_lock_);
321 
322   // Determine what kind of indirect reference this is. Opposite of EncodeIndirectRefKind.
GetIndirectRefKind(IndirectRef iref)323   ALWAYS_INLINE static inline IndirectRefKind GetIndirectRefKind(IndirectRef iref) {
324     return DecodeIndirectRefKind(reinterpret_cast<uintptr_t>(iref));
325   }
326 
327  private:
328   static constexpr size_t kSerialBits = MinimumBitsToStore(kIRTPrevCount);
329   static constexpr uint32_t kShiftedSerialMask = (1u << kSerialBits) - 1;
330 
331   static constexpr size_t kKindBits = MinimumBitsToStore(
332       static_cast<uint32_t>(IndirectRefKind::kLastKind));
333   static constexpr uint32_t kKindMask = (1u << kKindBits) - 1;
334 
EncodeIndex(uint32_t table_index)335   static constexpr uintptr_t EncodeIndex(uint32_t table_index) {
336     static_assert(sizeof(IndirectRef) == sizeof(uintptr_t), "Unexpected IndirectRef size");
337     DCHECK_LE(MinimumBitsToStore(table_index), BitSizeOf<uintptr_t>() - kSerialBits - kKindBits);
338     return (static_cast<uintptr_t>(table_index) << kKindBits << kSerialBits);
339   }
DecodeIndex(uintptr_t uref)340   static constexpr uint32_t DecodeIndex(uintptr_t uref) {
341     return static_cast<uint32_t>((uref >> kKindBits) >> kSerialBits);
342   }
343 
EncodeIndirectRefKind(IndirectRefKind kind)344   static constexpr uintptr_t EncodeIndirectRefKind(IndirectRefKind kind) {
345     return static_cast<uintptr_t>(kind);
346   }
DecodeIndirectRefKind(uintptr_t uref)347   static constexpr IndirectRefKind DecodeIndirectRefKind(uintptr_t uref) {
348     return static_cast<IndirectRefKind>(uref & kKindMask);
349   }
350 
EncodeSerial(uint32_t serial)351   static constexpr uintptr_t EncodeSerial(uint32_t serial) {
352     DCHECK_LE(MinimumBitsToStore(serial), kSerialBits);
353     return serial << kKindBits;
354   }
DecodeSerial(uintptr_t uref)355   static constexpr uint32_t DecodeSerial(uintptr_t uref) {
356     return static_cast<uint32_t>(uref >> kKindBits) & kShiftedSerialMask;
357   }
358 
EncodeIndirectRef(uint32_t table_index,uint32_t serial)359   constexpr uintptr_t EncodeIndirectRef(uint32_t table_index, uint32_t serial) const {
360     DCHECK_LT(table_index, max_entries_);
361     return EncodeIndex(table_index) | EncodeSerial(serial) | EncodeIndirectRefKind(kind_);
362   }
363 
364   static void ConstexprChecks();
365 
366   // Extract the table index from an indirect reference.
ExtractIndex(IndirectRef iref)367   ALWAYS_INLINE static uint32_t ExtractIndex(IndirectRef iref) {
368     return DecodeIndex(reinterpret_cast<uintptr_t>(iref));
369   }
370 
ToIndirectRef(uint32_t table_index)371   IndirectRef ToIndirectRef(uint32_t table_index) const {
372     DCHECK_LT(table_index, max_entries_);
373     uint32_t serial = table_[table_index].GetSerial();
374     return reinterpret_cast<IndirectRef>(EncodeIndirectRef(table_index, serial));
375   }
376 
377   // Resize the backing table. Currently must be larger than the current size.
378   bool Resize(size_t new_size, std::string* error_msg);
379 
380   void RecoverHoles(IRTSegmentState from);
381 
382   // Abort if check_jni is not enabled. Otherwise, just log as an error.
383   static void AbortIfNoCheckJNI(const std::string& msg);
384 
385   /* extra debugging checks */
386   bool GetChecked(IndirectRef) const REQUIRES_SHARED(Locks::mutator_lock_);
387   bool CheckEntry(const char*, IndirectRef, uint32_t) const;
388 
389   /// semi-public - read/write by jni down calls.
390   IRTSegmentState segment_state_;
391 
392   // Mem map where we store the indirect refs.
393   std::unique_ptr<MemMap> table_mem_map_;
394   // bottom of the stack. Do not directly access the object references
395   // in this as they are roots. Use Get() that has a read barrier.
396   IrtEntry* table_;
397   // bit mask, ORed into all irefs.
398   const IndirectRefKind kind_;
399 
400   // max #of entries allowed (modulo resizing).
401   size_t max_entries_;
402 
403   // Some values to retain old behavior with holes. Description of the algorithm is in the .cc
404   // file.
405   // TODO: Consider other data structures for compact tables, e.g., free lists.
406   size_t current_num_holes_;
407   IRTSegmentState last_known_previous_state_;
408 
409   // Whether the table's capacity may be resized. As there are no locks used, it is the caller's
410   // responsibility to ensure thread-safety.
411   ResizableCapacity resizable_;
412 };
413 
414 }  // namespace art
415 
416 #endif  // ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_
417