1 /*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_
18 #define ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_
19
20 #include <stdint.h>
21
22 #include <iosfwd>
23 #include <string>
24
25 #include "base/logging.h"
26 #include "base/mutex.h"
27 #include "gc_root.h"
28 #include "mem_map.h"
29 #include "object_callbacks.h"
30 #include "offsets.h"
31 #include "read_barrier_option.h"
32
33 namespace art {
34 namespace mirror {
35 class Object;
36 } // namespace mirror
37
38 /*
39 * Maintain a table of indirect references. Used for local/global JNI
40 * references.
41 *
42 * The table contains object references that are part of the GC root set.
43 * When an object is added we return an IndirectRef that is not a valid
44 * pointer but can be used to find the original value in O(1) time.
45 * Conversions to and from indirect references are performed on upcalls
46 * and downcalls, so they need to be very fast.
47 *
48 * To be efficient for JNI local variable storage, we need to provide
49 * operations that allow us to operate on segments of the table, where
50 * segments are pushed and popped as if on a stack. For example, deletion
51 * of an entry should only succeed if it appears in the current segment,
52 * and we want to be able to strip off the current segment quickly when
53 * a method returns. Additions to the table must be made in the current
54 * segment even if space is available in an earlier area.
55 *
56 * A new segment is created when we call into native code from interpreted
57 * code, or when we handle the JNI PushLocalFrame function.
58 *
59 * The GC must be able to scan the entire table quickly.
60 *
61 * In summary, these must be very fast:
62 * - adding or removing a segment
63 * - adding references to a new segment
64 * - converting an indirect reference back to an Object
65 * These can be a little slower, but must still be pretty quick:
66 * - adding references to a "mature" segment
67 * - removing individual references
68 * - scanning the entire table straight through
69 *
70 * If there's more than one segment, we don't guarantee that the table
71 * will fill completely before we fail due to lack of space. We do ensure
72 * that the current segment will pack tightly, which should satisfy JNI
73 * requirements (e.g. EnsureLocalCapacity).
74 *
75 * To make everything fit nicely in 32-bit integers, the maximum size of
76 * the table is capped at 64K.
77 *
78 * Only SynchronizedGet is synchronized.
79 */
80
81 /*
82 * Indirect reference definition. This must be interchangeable with JNI's
83 * jobject, and it's convenient to let null be null, so we use void*.
84 *
85 * We need a 16-bit table index and a 2-bit reference type (global, local,
86 * weak global). Real object pointers will have zeroes in the low 2 or 3
87 * bits (4- or 8-byte alignment), so it's useful to put the ref type
88 * in the low bits and reserve zero as an invalid value.
89 *
90 * The remaining 14 bits can be used to detect stale indirect references.
91 * For example, if objects don't move, we can use a hash of the original
92 * Object* to make sure the entry hasn't been re-used. (If the Object*
93 * we find there doesn't match because of heap movement, we could do a
94 * secondary check on the preserved hash value; this implies that creating
95 * a global/local ref queries the hash value and forces it to be saved.)
96 *
97 * A more rigorous approach would be to put a serial number in the extra
98 * bits, and keep a copy of the serial number in a parallel table. This is
99 * easier when objects can move, but requires 2x the memory and additional
100 * memory accesses on add/get. It will catch additional problems, e.g.:
101 * create iref1 for obj, delete iref1, create iref2 for same obj, lookup
102 * iref1. A pattern based on object bits will miss this.
103 */
104 typedef void* IndirectRef;
105
106 // Magic failure values; must not pass Heap::ValidateObject() or Heap::IsHeapAddress().
107 static mirror::Object* const kInvalidIndirectRefObject = reinterpret_cast<mirror::Object*>(0xdead4321);
108 static mirror::Object* const kClearedJniWeakGlobal = reinterpret_cast<mirror::Object*>(0xdead1234);
109
110 /*
111 * Indirect reference kind, used as the two low bits of IndirectRef.
112 *
113 * For convenience these match up with enum jobjectRefType from jni.h.
114 */
115 enum IndirectRefKind {
116 kHandleScopeOrInvalid = 0, // <<stack indirect reference table or invalid reference>>
117 kLocal = 1, // <<local reference>>
118 kGlobal = 2, // <<global reference>>
119 kWeakGlobal = 3 // <<weak global reference>>
120 };
121 std::ostream& operator<<(std::ostream& os, const IndirectRefKind& rhs);
122
123 /*
124 * Determine what kind of indirect reference this is.
125 */
GetIndirectRefKind(IndirectRef iref)126 static inline IndirectRefKind GetIndirectRefKind(IndirectRef iref) {
127 return static_cast<IndirectRefKind>(reinterpret_cast<uintptr_t>(iref) & 0x03);
128 }
129
130 /*
131 * Extended debugging structure. We keep a parallel array of these, one
132 * per slot in the table.
133 */
134 static const size_t kIRTPrevCount = 4;
135 struct IndirectRefSlot {
136 uint32_t serial;
137 const mirror::Object* previous[kIRTPrevCount];
138 };
139
140 /* use as initial value for "cookie", and when table has only one segment */
141 static const uint32_t IRT_FIRST_SEGMENT = 0;
142
143 /*
144 * Table definition.
145 *
146 * For the global reference table, the expected common operations are
147 * adding a new entry and removing a recently-added entry (usually the
148 * most-recently-added entry). For JNI local references, the common
149 * operations are adding a new entry and removing an entire table segment.
150 *
151 * If "alloc_entries_" is not equal to "max_entries_", the table may expand
152 * when entries are added, which means the memory may move. If you want
153 * to keep pointers into "table" rather than offsets, you must use a
154 * fixed-size table.
155 *
156 * If we delete entries from the middle of the list, we will be left with
157 * "holes". We track the number of holes so that, when adding new elements,
158 * we can quickly decide to do a trivial append or go slot-hunting.
159 *
160 * When the top-most entry is removed, any holes immediately below it are
161 * also removed. Thus, deletion of an entry may reduce "topIndex" by more
162 * than one.
163 *
164 * To get the desired behavior for JNI locals, we need to know the bottom
165 * and top of the current "segment". The top is managed internally, and
166 * the bottom is passed in as a function argument. When we call a native method or
167 * push a local frame, the current top index gets pushed on, and serves
168 * as the new bottom. When we pop a frame off, the value from the stack
169 * becomes the new top index, and the value stored in the previous frame
170 * becomes the new bottom.
171 *
172 * To avoid having to re-scan the table after a pop, we want to push the
173 * number of holes in the table onto the stack. Because of our 64K-entry
174 * cap, we can combine the two into a single unsigned 32-bit value.
175 * Instead of a "bottom" argument we take a "cookie", which includes the
176 * bottom index and the count of holes below the bottom.
177 *
178 * Common alternative implementation: make IndirectRef a pointer to the
179 * actual reference slot. Instead of getting a table and doing a lookup,
180 * the lookup can be done instantly. Operations like determining the
181 * type and deleting the reference are more expensive because the table
182 * must be hunted for (i.e. you have to do a pointer comparison to see
183 * which table it's in), you can't move the table when expanding it (so
184 * realloc() is out), and tricks like serial number checking to detect
185 * stale references aren't possible (though we may be able to get similar
186 * benefits with other approaches).
187 *
188 * TODO: consider a "lastDeleteIndex" for quick hole-filling when an
189 * add immediately follows a delete; must invalidate after segment pop
190 * (which could increase the cost/complexity of method call/return).
191 * Might be worth only using it for JNI globals.
192 *
193 * TODO: may want completely different add/remove algorithms for global
194 * and local refs to improve performance. A large circular buffer might
195 * reduce the amortized cost of adding global references.
196 *
197 */
198 union IRTSegmentState {
199 uint32_t all;
200 struct {
201 uint32_t topIndex:16; /* index of first unused entry */
202 uint32_t numHoles:16; /* #of holes in entire table */
203 } parts;
204 };
205
206 class IrtIterator {
207 public:
IrtIterator(GcRoot<mirror::Object> * table,size_t i,size_t capacity)208 explicit IrtIterator(GcRoot<mirror::Object>* table, size_t i, size_t capacity)
209 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
210 : table_(table), i_(i), capacity_(capacity) {
211 SkipNullsAndTombstones();
212 }
213
214 IrtIterator& operator++() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
215 ++i_;
216 SkipNullsAndTombstones();
217 return *this;
218 }
219
220 mirror::Object** operator*() {
221 // This does not have a read barrier as this is used to visit roots.
222 return table_[i_].AddressWithoutBarrier();
223 }
224
equals(const IrtIterator & rhs)225 bool equals(const IrtIterator& rhs) const {
226 return (i_ == rhs.i_ && table_ == rhs.table_);
227 }
228
229 private:
SkipNullsAndTombstones()230 void SkipNullsAndTombstones() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
231 // We skip NULLs and tombstones. Clients don't want to see implementation details.
232 while (i_ < capacity_ &&
233 (table_[i_].IsNull() ||
234 table_[i_].Read<kWithoutReadBarrier>() == kClearedJniWeakGlobal)) {
235 ++i_;
236 }
237 }
238
239 GcRoot<mirror::Object>* const table_;
240 size_t i_;
241 size_t capacity_;
242 };
243
244 bool inline operator==(const IrtIterator& lhs, const IrtIterator& rhs) {
245 return lhs.equals(rhs);
246 }
247
248 bool inline operator!=(const IrtIterator& lhs, const IrtIterator& rhs) {
249 return !lhs.equals(rhs);
250 }
251
252 class IndirectReferenceTable {
253 public:
254 IndirectReferenceTable(size_t initialCount, size_t maxCount, IndirectRefKind kind);
255
256 ~IndirectReferenceTable();
257
258 /*
259 * Add a new entry. "obj" must be a valid non-NULL object reference.
260 *
261 * Returns NULL if the table is full (max entries reached, or alloc
262 * failed during expansion).
263 */
264 IndirectRef Add(uint32_t cookie, mirror::Object* obj)
265 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
266
267 /*
268 * Given an IndirectRef in the table, return the Object it refers to.
269 *
270 * Returns kInvalidIndirectRefObject if iref is invalid.
271 */
272 template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
273 mirror::Object* Get(IndirectRef iref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
274 ALWAYS_INLINE;
275
276 // Synchronized get which reads a reference, acquiring a lock if necessary.
277 template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
SynchronizedGet(Thread *,ReaderWriterMutex *,IndirectRef iref)278 mirror::Object* SynchronizedGet(Thread* /*self*/, ReaderWriterMutex* /*mutex*/,
279 IndirectRef iref) const
280 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
281 return Get<kReadBarrierOption>(iref);
282 }
283
284 /*
285 * Remove an existing entry.
286 *
287 * If the entry is not between the current top index and the bottom index
288 * specified by the cookie, we don't remove anything. This is the behavior
289 * required by JNI's DeleteLocalRef function.
290 *
291 * Returns "false" if nothing was removed.
292 */
293 bool Remove(uint32_t cookie, IndirectRef iref);
294
295 void AssertEmpty();
296
297 void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
298
299 /*
300 * Return the #of entries in the entire table. This includes holes, and
301 * so may be larger than the actual number of "live" entries.
302 */
Capacity()303 size_t Capacity() const {
304 return segment_state_.parts.topIndex;
305 }
306
307 // Note IrtIterator does not have a read barrier as it's used to visit roots.
begin()308 IrtIterator begin() {
309 return IrtIterator(table_, 0, Capacity());
310 }
311
end()312 IrtIterator end() {
313 return IrtIterator(table_, Capacity(), Capacity());
314 }
315
316 void VisitRoots(RootCallback* callback, void* arg, uint32_t tid, RootType root_type)
317 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
318
GetSegmentState()319 uint32_t GetSegmentState() const {
320 return segment_state_.all;
321 }
322
SetSegmentState(uint32_t new_state)323 void SetSegmentState(uint32_t new_state) {
324 segment_state_.all = new_state;
325 }
326
SegmentStateOffset()327 static Offset SegmentStateOffset() {
328 return Offset(OFFSETOF_MEMBER(IndirectReferenceTable, segment_state_));
329 }
330
331 private:
332 /*
333 * Extract the table index from an indirect reference.
334 */
ExtractIndex(IndirectRef iref)335 static uint32_t ExtractIndex(IndirectRef iref) {
336 uintptr_t uref = reinterpret_cast<uintptr_t>(iref);
337 return (uref >> 2) & 0xffff;
338 }
339
340 /*
341 * The object pointer itself is subject to relocation in some GC
342 * implementations, so we shouldn't really be using it here.
343 */
ToIndirectRef(uint32_t tableIndex)344 IndirectRef ToIndirectRef(uint32_t tableIndex) const {
345 DCHECK_LT(tableIndex, 65536U);
346 uint32_t serialChunk = slot_data_[tableIndex].serial;
347 uintptr_t uref = serialChunk << 20 | (tableIndex << 2) | kind_;
348 return reinterpret_cast<IndirectRef>(uref);
349 }
350
351 /*
352 * Update extended debug info when an entry is added.
353 *
354 * We advance the serial number, invalidating any outstanding references to
355 * this slot.
356 */
UpdateSlotAdd(const mirror::Object * obj,int slot)357 void UpdateSlotAdd(const mirror::Object* obj, int slot) {
358 if (slot_data_ != NULL) {
359 IndirectRefSlot* pSlot = &slot_data_[slot];
360 pSlot->serial++;
361 pSlot->previous[pSlot->serial % kIRTPrevCount] = obj;
362 }
363 }
364
365 // Abort if check_jni is not enabled.
366 static void AbortIfNoCheckJNI();
367
368 /* extra debugging checks */
369 bool GetChecked(IndirectRef) const;
370 bool CheckEntry(const char*, IndirectRef, int) const;
371
372 /* semi-public - read/write by jni down calls */
373 IRTSegmentState segment_state_;
374
375 // Mem map where we store the indirect refs.
376 std::unique_ptr<MemMap> table_mem_map_;
377 // Mem map where we store the extended debugging info.
378 std::unique_ptr<MemMap> slot_mem_map_;
379 // bottom of the stack. Do not directly access the object references
380 // in this as they are roots. Use Get() that has a read barrier.
381 GcRoot<mirror::Object>* table_;
382 /* bit mask, ORed into all irefs */
383 IndirectRefKind kind_;
384 /* extended debugging info */
385 IndirectRefSlot* slot_data_;
386 /* #of entries we have space for */
387 size_t alloc_entries_;
388 /* max #of entries allowed */
389 size_t max_entries_;
390 };
391
392 } // namespace art
393
394 #endif // ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_
395