• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2009 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef DALVIK_INDIRECTREFTABLE_H_
18 #define DALVIK_INDIRECTREFTABLE_H_
19 
20 /*
21  * Maintain a table of indirect references.  Used for local/global JNI
22  * references.
23  *
24  * The table contains object references that are part of the GC root set.
25  * When an object is added we return an IndirectRef that is not a valid
26  * pointer but can be used to find the original value in O(1) time.
27  * Conversions to and from indirect refs are performed on JNI method calls
28  * in and out of the VM, so they need to be very fast.
29  *
30  * To be efficient for JNI local variable storage, we need to provide
31  * operations that allow us to operate on segments of the table, where
32  * segments are pushed and popped as if on a stack.  For example, deletion
33  * of an entry should only succeed if it appears in the current segment,
34  * and we want to be able to strip off the current segment quickly when
35  * a method returns.  Additions to the table must be made in the current
36  * segment even if space is available in an earlier area.
37  *
38  * A new segment is created when we call into native code from interpreted
39  * code, or when we handle the JNI PushLocalFrame function.
40  *
41  * The GC must be able to scan the entire table quickly.
42  *
43  * In summary, these must be very fast:
44  *  - adding or removing a segment
45  *  - adding references to a new segment
46  *  - converting an indirect reference back to an Object
47  * These can be a little slower, but must still be pretty quick:
48  *  - adding references to a "mature" segment
49  *  - removing individual references
50  *  - scanning the entire table straight through
51  *
52  * If there's more than one segment, we don't guarantee that the table
53  * will fill completely before we fail due to lack of space.  We do ensure
54  * that the current segment will pack tightly, which should satisfy JNI
55  * requirements (e.g. EnsureLocalCapacity).
56  *
57  * To make everything fit nicely in 32-bit integers, the maximum size of
58  * the table is capped at 64K.
59  *
60  * None of the table functions are synchronized.
61  */
62 
63 /*
64  * Indirect reference definition.  This must be interchangeable with JNI's
65  * jobject, and it's convenient to let null be null, so we use void*.
66  *
67  * We need a 16-bit table index and a 2-bit reference type (global, local,
68  * weak global).  Real object pointers will have zeroes in the low 2 or 3
69  * bits (4- or 8-byte alignment), so it's useful to put the ref type
70  * in the low bits and reserve zero as an invalid value.
71  *
72  * The remaining 14 bits can be used to detect stale indirect references.
73  * For example, if objects don't move, we can use a hash of the original
74  * Object* to make sure the entry hasn't been re-used.  (If the Object*
75  * we find there doesn't match because of heap movement, we could do a
76  * secondary check on the preserved hash value; this implies that creating
77  * a global/local ref queries the hash value and forces it to be saved.)
78  *
79  * A more rigorous approach would be to put a serial number in the extra
80  * bits, and keep a copy of the serial number in a parallel table.  This is
81  * easier when objects can move, but requires 2x the memory and additional
82  * memory accesses on add/get.  It will catch additional problems, e.g.:
83  * create iref1 for obj, delete iref1, create iref2 for same obj, lookup
84  * iref1.  A pattern based on object bits will miss this.
85  *
86  * For now, we use a serial number.
87  */
88 typedef void* IndirectRef;
89 
90 /* magic failure value; must not pass dvmIsHeapAddress() */
91 #define kInvalidIndirectRefObject reinterpret_cast<Object*>(0xdead4321)
92 
93 #define kClearedJniWeakGlobal reinterpret_cast<Object*>(0xdead1234)
94 
95 /*
96  * Indirect reference kind, used as the two low bits of IndirectRef.
97  *
98  * For convenience these match up with enum jobjectRefType from jni.h.
99  */
100 enum IndirectRefKind {
101     kIndirectKindInvalid    = 0,
102     kIndirectKindLocal      = 1,
103     kIndirectKindGlobal     = 2,
104     kIndirectKindWeakGlobal = 3
105 };
106 const char* indirectRefKindToString(IndirectRefKind kind);
107 
108 /*
109  * Determine what kind of indirect reference this is.
110  */
indirectRefKind(IndirectRef iref)111 INLINE IndirectRefKind indirectRefKind(IndirectRef iref)
112 {
113     return (IndirectRefKind)((u4) iref & 0x03);
114 }
115 
116 /*
117  * Information we store for each slot in the reference table.
118  */
119 struct IndirectRefSlot {
120     Object* obj;        /* object pointer itself, NULL if the slot is unused */
121     u4      serial;     /* slot serial number */
122 };
123 
124 /* use as initial value for "cookie", and when table has only one segment */
125 #define IRT_FIRST_SEGMENT   0
126 
127 /*
128  * Table definition.
129  *
130  * For the global reference table, the expected common operations are
131  * adding a new entry and removing a recently-added entry (usually the
132  * most-recently-added entry).  For JNI local references, the common
133  * operations are adding a new entry and removing an entire table segment.
134  *
135  * If "alloc_entries_" is not equal to "max_entries_", the table may expand
136  * when entries are added, which means the memory may move.  If you want
137  * to keep pointers into "table" rather than offsets, you must use a
138  * fixed-size table.
139  *
140  * If we delete entries from the middle of the list, we will be left with
141  * "holes".  We track the number of holes so that, when adding new elements,
142  * we can quickly decide to do a trivial append or go slot-hunting.
143  *
144  * When the top-most entry is removed, any holes immediately below it are
145  * also removed.  Thus, deletion of an entry may reduce "topIndex" by more
146  * than one.
147  *
148  * To get the desired behavior for JNI locals, we need to know the bottom
149  * and top of the current "segment".  The top is managed internally, and
150  * the bottom is passed in as a function argument (the VM keeps it in a
151  * slot in the interpreted stack frame).  When we call a native method or
152  * push a local frame, the current top index gets pushed on, and serves
153  * as the new bottom.  When we pop a frame off, the value from the stack
154  * becomes the new top index, and the value stored in the previous frame
155  * becomes the new bottom.
156  *
157  * To avoid having to re-scan the table after a pop, we want to push the
158  * number of holes in the table onto the stack.  Because of our 64K-entry
159  * cap, we can combine the two into a single unsigned 32-bit value.
160  * Instead of a "bottom" argument we take a "cookie", which includes the
161  * bottom index and the count of holes below the bottom.
162  *
163  * We need to minimize method call/return overhead.  If we store the
164  * "cookie" externally, on the interpreted call stack, the VM can handle
165  * pushes and pops with a single 4-byte load and store.  (We could also
166  * store it internally in a public structure, but the local JNI refs are
167  * logically tied to interpreted stack frames anyway.)
168  *
169  * Common alternative implementation: make IndirectRef a pointer to the
170  * actual reference slot.  Instead of getting a table and doing a lookup,
171  * the lookup can be done instantly.  Operations like determining the
172  * type and deleting the reference are more expensive because the table
173  * must be hunted for (i.e. you have to do a pointer comparison to see
174  * which table it's in), you can't move the table when expanding it (so
175  * realloc() is out), and tricks like serial number checking to detect
176  * stale references aren't possible (though we may be able to get similar
177  * benefits with other approaches).
178  *
179  * TODO: consider a "lastDeleteIndex" for quick hole-filling when an
180  * add immediately follows a delete; must invalidate after segment pop
181  * (which could increase the cost/complexity of method call/return).
182  * Might be worth only using it for JNI globals.
183  *
184  * TODO: may want completely different add/remove algorithms for global
185  * and local refs to improve performance.  A large circular buffer might
186  * reduce the amortized cost of adding global references.
187  *
188  * TODO: if we can guarantee that the underlying storage doesn't move,
189  * e.g. by using oversized mmap regions to handle expanding tables, we may
190  * be able to avoid having to synchronize lookups.  Might make sense to
191  * add a "synchronized lookup" call that takes the mutex as an argument,
192  * and either locks or doesn't lock based on internal details.
193  */
194 union IRTSegmentState {
195     u4          all;
196     struct {
197         u4      topIndex:16;            /* index of first unused entry */
198         u4      numHoles:16;            /* #of holes in entire table */
199     } parts;
200 };
201 
202 class iref_iterator {
203 public:
iref_iterator(IndirectRefSlot * table,size_t i,size_t capacity)204     explicit iref_iterator(IndirectRefSlot* table, size_t i, size_t capacity) :
205             table_(table), i_(i), capacity_(capacity) {
206         skipNullsAndTombstones();
207     }
208 
209     iref_iterator& operator++() {
210         ++i_;
211         skipNullsAndTombstones();
212         return *this;
213     }
214 
215     Object** operator*() {
216         return &table_[i_].obj;
217     }
218 
equals(const iref_iterator & rhs)219     bool equals(const iref_iterator& rhs) const {
220         return (i_ == rhs.i_ && table_ == rhs.table_);
221     }
222 
223 private:
skipNullsAndTombstones()224     void skipNullsAndTombstones() {
225         // We skip NULLs and tombstones. Clients don't want to see implementation details.
226         while (i_ < capacity_ && (table_[i_].obj == NULL
227                 || table_[i_].obj == kClearedJniWeakGlobal)) {
228             ++i_;
229         }
230     }
231 
232     IndirectRefSlot* table_;
233     size_t i_;
234     size_t capacity_;
235 };
236 
237 bool inline operator!=(const iref_iterator& lhs, const iref_iterator& rhs) {
238     return !lhs.equals(rhs);
239 }
240 
241 struct IndirectRefTable {
242 public:
243     typedef iref_iterator iterator;
244 
245     /* semi-public - read/write by interpreter in native call handler */
246     IRTSegmentState segmentState;
247 
248     /*
249      * private:
250      *
251      * TODO: we can't make these private as long as the interpreter
252      * uses offsetof, since private member data makes us non-POD.
253      */
254     /* bottom of the stack */
255     IndirectRefSlot* table_;
256     /* bit mask, ORed into all irefs */
257     IndirectRefKind kind_;
258     /* #of entries we have space for */
259     size_t          alloc_entries_;
260     /* max #of entries allowed */
261     size_t          max_entries_;
262 
263     // TODO: want hole-filling stats (#of holes filled, total entries scanned)
264     //       for performance evaluation.
265 
266     /*
267      * Add a new entry.  "obj" must be a valid non-NULL object reference
268      * (though it's okay if it's not fully-formed, e.g. the result from
269      * dvmMalloc doesn't have obj->clazz set).
270      *
271      * Returns NULL if the table is full (max entries reached, or alloc
272      * failed during expansion).
273      */
274     IndirectRef add(u4 cookie, Object* obj);
275 
276     /*
277      * Given an IndirectRef in the table, return the Object it refers to.
278      *
279      * Returns kInvalidIndirectRefObject if iref is invalid.
280      */
281     Object* get(IndirectRef iref) const;
282 
283     /*
284      * Returns true if the table contains a reference to this object.
285      */
286     bool contains(const Object* obj) const;
287 
288     /*
289      * Remove an existing entry.
290      *
291      * If the entry is not between the current top index and the bottom index
292      * specified by the cookie, we don't remove anything.  This is the behavior
293      * required by JNI's DeleteLocalRef function.
294      *
295      * Returns "false" if nothing was removed.
296      */
297     bool remove(u4 cookie, IndirectRef iref);
298 
299     /*
300      * Initialize an IndirectRefTable.
301      *
302      * If "initialCount" != "maxCount", the table will expand as required.
303      *
304      * "kind" should be Local or Global.  The Global table may also hold
305      * WeakGlobal refs.
306      *
307      * Returns "false" if table allocation fails.
308      */
309     bool init(size_t initialCount, size_t maxCount, IndirectRefKind kind);
310 
311     /*
312      * Clear out the contents, freeing allocated storage.
313      *
314      * You must call dvmInitReferenceTable() before you can re-use this table.
315      *
316      * TODO: this should be a destructor.
317      */
318     void destroy();
319 
320     /*
321      * Dump the contents of a reference table to the log file.
322      *
323      * The caller should lock any external sync before calling.
324      *
325      * TODO: we should name the table in a constructor and remove
326      * the argument here.
327      */
328     void dump(const char* descr) const;
329 
330     /*
331      * Return the #of entries in the entire table.  This includes holes, and
332      * so may be larger than the actual number of "live" entries.
333      */
capacityIndirectRefTable334     size_t capacity() const {
335         return segmentState.parts.topIndex;
336     }
337 
beginIndirectRefTable338     iterator begin() {
339         return iterator(table_, 0, capacity());
340     }
341 
endIndirectRefTable342     iterator end() {
343         return iterator(table_, capacity(), capacity());
344     }
345 
346 private:
extractIndexIndirectRefTable347     static inline u4 extractIndex(IndirectRef iref) {
348         u4 uref = (u4) iref;
349         return (uref >> 2) & 0xffff;
350     }
351 
extractSerialIndirectRefTable352     static inline u4 extractSerial(IndirectRef iref) {
353         u4 uref = (u4) iref;
354         return uref >> 20;
355     }
356 
nextSerialIndirectRefTable357     static inline u4 nextSerial(u4 serial) {
358         return (serial + 1) & 0xfff;
359     }
360 
toIndirectRefIndirectRefTable361     static inline IndirectRef toIndirectRef(u4 index, u4 serial, IndirectRefKind kind) {
362         assert(index < 65536);
363         return reinterpret_cast<IndirectRef>((serial << 20) | (index << 2) | kind);
364     }
365 };
366 
367 #endif  // DALVIK_INDIRECTREFTABLE_H_
368