• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2009 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "base/bit_utils.h"
18 #include "base/globals.h"
19 #include "indirect_reference_table-inl.h"
20 
21 #include "base/mutator_locked_dumpable.h"
22 #include "base/systrace.h"
23 #include "base/utils.h"
24 #include "indirect_reference_table.h"
25 #include "jni/java_vm_ext.h"
26 #include "jni/jni_internal.h"
27 #include "mirror/object-inl.h"
28 #include "nth_caller_visitor.h"
29 #include "reference_table.h"
30 #include "runtime.h"
31 #include "scoped_thread_state_change-inl.h"
32 #include "thread.h"
33 
34 #include <cstdlib>
35 
36 namespace art {
37 
38 static constexpr bool kDumpStackOnNonLocalReference = false;
39 static constexpr bool kDebugIRT = false;
40 
41 // Maximum table size we allow.
42 static constexpr size_t kMaxTableSizeInBytes = 128 * MB;
43 
GetIndirectRefKindString(const IndirectRefKind & kind)44 const char* GetIndirectRefKindString(const IndirectRefKind& kind) {
45   switch (kind) {
46     case kJniTransitionOrInvalid:
47       return "JniTransitionOrInvalid";
48     case kLocal:
49       return "Local";
50     case kGlobal:
51       return "Global";
52     case kWeakGlobal:
53       return "WeakGlobal";
54   }
55   return "IndirectRefKind Error";
56 }
57 
AbortIfNoCheckJNI(const std::string & msg)58 void IndirectReferenceTable::AbortIfNoCheckJNI(const std::string& msg) {
59   // If -Xcheck:jni is on, it'll give a more detailed error before aborting.
60   JavaVMExt* vm = Runtime::Current()->GetJavaVM();
61   if (!vm->IsCheckJniEnabled()) {
62     // Otherwise, we want to abort rather than hand back a bad reference.
63     LOG(FATAL) << msg;
64   } else {
65     LOG(ERROR) << msg;
66   }
67 }
68 
69 // Mmap an "indirect ref table region. Table_bytes is a multiple of a page size.
NewIRTMap(size_t table_bytes,std::string * error_msg)70 static inline MemMap NewIRTMap(size_t table_bytes, std::string* error_msg) {
71   MemMap result = MemMap::MapAnonymous("indirect ref table",
72                                        table_bytes,
73                                        PROT_READ | PROT_WRITE,
74                                        /*low_4gb=*/ false,
75                                        error_msg);
76   if (!result.IsValid() && error_msg->empty()) {
77       *error_msg = "Unable to map memory for indirect ref table";
78   }
79   return result;
80 }
81 
SmallIrtAllocator()82 SmallIrtAllocator::SmallIrtAllocator()
83     : small_irt_freelist_(nullptr), lock_("Small IRT table lock", LockLevel::kGenericBottomLock) {
84 }
85 
86 // Allocate an IRT table for kSmallIrtEntries.
Allocate(std::string * error_msg)87 IrtEntry* SmallIrtAllocator::Allocate(std::string* error_msg) {
88   MutexLock lock(Thread::Current(), lock_);
89   if (small_irt_freelist_ == nullptr) {
90     // Refill.
91     MemMap map = NewIRTMap(kPageSize, error_msg);
92     if (map.IsValid()) {
93       small_irt_freelist_ = reinterpret_cast<IrtEntry*>(map.Begin());
94       for (uint8_t* p = map.Begin(); p + kInitialIrtBytes < map.End(); p += kInitialIrtBytes) {
95         *reinterpret_cast<IrtEntry**>(p) = reinterpret_cast<IrtEntry*>(p + kInitialIrtBytes);
96       }
97       shared_irt_maps_.emplace_back(std::move(map));
98     }
99   }
100   if (small_irt_freelist_ == nullptr) {
101     return nullptr;
102   }
103   IrtEntry* result = small_irt_freelist_;
104   small_irt_freelist_ = *reinterpret_cast<IrtEntry**>(small_irt_freelist_);
105   // Clear pointer in first entry.
106   new(result) IrtEntry();
107   return result;
108 }
109 
Deallocate(IrtEntry * unneeded)110 void SmallIrtAllocator::Deallocate(IrtEntry* unneeded) {
111   MutexLock lock(Thread::Current(), lock_);
112   *reinterpret_cast<IrtEntry**>(unneeded) = small_irt_freelist_;
113   small_irt_freelist_ = unneeded;
114 }
115 
IndirectReferenceTable(size_t max_count,IndirectRefKind desired_kind,ResizableCapacity resizable,std::string * error_msg)116 IndirectReferenceTable::IndirectReferenceTable(size_t max_count,
117                                                IndirectRefKind desired_kind,
118                                                ResizableCapacity resizable,
119                                                std::string* error_msg)
120     : segment_state_(kIRTFirstSegment),
121       table_(nullptr),
122       kind_(desired_kind),
123       max_entries_(max_count),
124       current_num_holes_(0),
125       resizable_(resizable) {
126   CHECK(error_msg != nullptr);
127   CHECK_NE(desired_kind, kJniTransitionOrInvalid);
128 
129   // Overflow and maximum check.
130   CHECK_LE(max_count, kMaxTableSizeInBytes / sizeof(IrtEntry));
131 
132   if (max_entries_ <= kSmallIrtEntries) {
133     table_ = Runtime::Current()->GetSmallIrtAllocator()->Allocate(error_msg);
134     if (table_ != nullptr) {
135       max_entries_ = kSmallIrtEntries;
136       // table_mem_map_ remains invalid.
137     }
138   }
139   if (table_ == nullptr) {
140     const size_t table_bytes = RoundUp(max_count * sizeof(IrtEntry), kPageSize);
141     table_mem_map_ = NewIRTMap(table_bytes, error_msg);
142     if (!table_mem_map_.IsValid() && error_msg->empty()) {
143       *error_msg = "Unable to map memory for indirect ref table";
144     }
145 
146     if (table_mem_map_.IsValid()) {
147       table_ = reinterpret_cast<IrtEntry*>(table_mem_map_.Begin());
148     } else {
149       table_ = nullptr;
150     }
151     // Take into account the actual length.
152     max_entries_ = table_bytes / sizeof(IrtEntry);
153   }
154   segment_state_ = kIRTFirstSegment;
155   last_known_previous_state_ = kIRTFirstSegment;
156 }
157 
~IndirectReferenceTable()158 IndirectReferenceTable::~IndirectReferenceTable() {
159   if (table_ != nullptr && !table_mem_map_.IsValid()) {
160     Runtime::Current()->GetSmallIrtAllocator()->Deallocate(table_);
161   }
162 }
163 
ConstexprChecks()164 void IndirectReferenceTable::ConstexprChecks() {
165   // Use this for some assertions. They can't be put into the header as C++ wants the class
166   // to be complete.
167 
168   // Check kind.
169   static_assert((EncodeIndirectRefKind(kLocal) & (~kKindMask)) == 0, "Kind encoding error");
170   static_assert((EncodeIndirectRefKind(kGlobal) & (~kKindMask)) == 0, "Kind encoding error");
171   static_assert((EncodeIndirectRefKind(kWeakGlobal) & (~kKindMask)) == 0, "Kind encoding error");
172   static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kLocal)) == kLocal,
173                 "Kind encoding error");
174   static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kGlobal)) == kGlobal,
175                 "Kind encoding error");
176   static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kWeakGlobal)) == kWeakGlobal,
177                 "Kind encoding error");
178 
179   // Check serial.
180   static_assert(DecodeSerial(EncodeSerial(0u)) == 0u, "Serial encoding error");
181   static_assert(DecodeSerial(EncodeSerial(1u)) == 1u, "Serial encoding error");
182   static_assert(DecodeSerial(EncodeSerial(2u)) == 2u, "Serial encoding error");
183   static_assert(DecodeSerial(EncodeSerial(3u)) == 3u, "Serial encoding error");
184 
185   // Table index.
186   static_assert(DecodeIndex(EncodeIndex(0u)) == 0u, "Index encoding error");
187   static_assert(DecodeIndex(EncodeIndex(1u)) == 1u, "Index encoding error");
188   static_assert(DecodeIndex(EncodeIndex(2u)) == 2u, "Index encoding error");
189   static_assert(DecodeIndex(EncodeIndex(3u)) == 3u, "Index encoding error");
190 }
191 
IsValid() const192 bool IndirectReferenceTable::IsValid() const {
193   return table_ != nullptr;
194 }
195 
196 // Holes:
197 //
198 // To keep the IRT compact, we want to fill "holes" created by non-stack-discipline Add & Remove
199 // operation sequences. For simplicity and lower memory overhead, we do not use a free list or
200 // similar. Instead, we scan for holes, with the expectation that we will find holes fast as they
201 // are usually near the end of the table (see the header, TODO: verify this assumption). To avoid
202 // scans when there are no holes, the number of known holes should be tracked.
203 //
204 // A previous implementation stored the top index and the number of holes as the segment state.
205 // This constraints the maximum number of references to 16-bit. We want to relax this, as it
206 // is easy to require more references (e.g., to list all classes in large applications). Thus,
207 // the implicitly stack-stored state, the IRTSegmentState, is only the top index.
208 //
209 // Thus, hole count is a local property of the current segment, and needs to be recovered when
210 // (or after) a frame is pushed or popped. To keep JNI transitions simple (and inlineable), we
211 // cannot do work when the segment changes. Thus, Add and Remove need to ensure the current
212 // hole count is correct.
213 //
214 // To be able to detect segment changes, we require an additional local field that can describe
215 // the known segment. This is last_known_previous_state_. The requirement will become clear with
216 // the following (some non-trivial) cases that have to be supported:
217 //
218 // 1) Segment with holes (current_num_holes_ > 0), push new segment, add/remove reference
219 // 2) Segment with holes (current_num_holes_ > 0), pop segment, add/remove reference
220 // 3) Segment with holes (current_num_holes_ > 0), push new segment, pop segment, add/remove
221 //    reference
222 // 4) Empty segment, push new segment, create a hole, pop a segment, add/remove a reference
223 // 5) Base segment, push new segment, create a hole, pop a segment, push new segment, add/remove
224 //    reference
225 //
226 // Storing the last known *previous* state (bottom index) allows conservatively detecting all the
227 // segment changes above. The condition is simply that the last known state is greater than or
228 // equal to the current previous state, and smaller than the current state (top index). The
229 // condition is conservative as it adds O(1) overhead to operations on an empty segment.
230 
CountNullEntries(const IrtEntry * table,size_t from,size_t to)231 static size_t CountNullEntries(const IrtEntry* table, size_t from, size_t to) {
232   size_t count = 0;
233   for (size_t index = from; index != to; ++index) {
234     if (table[index].GetReference()->IsNull()) {
235       count++;
236     }
237   }
238   return count;
239 }
240 
RecoverHoles(IRTSegmentState prev_state)241 void IndirectReferenceTable::RecoverHoles(IRTSegmentState prev_state) {
242   if (last_known_previous_state_.top_index >= segment_state_.top_index ||
243       last_known_previous_state_.top_index < prev_state.top_index) {
244     const size_t top_index = segment_state_.top_index;
245     size_t count = CountNullEntries(table_, prev_state.top_index, top_index);
246 
247     if (kDebugIRT) {
248       LOG(INFO) << "+++ Recovered holes: "
249                 << " Current prev=" << prev_state.top_index
250                 << " Current top_index=" << top_index
251                 << " Old num_holes=" << current_num_holes_
252                 << " New num_holes=" << count;
253     }
254 
255     current_num_holes_ = count;
256     last_known_previous_state_ = prev_state;
257   } else if (kDebugIRT) {
258     LOG(INFO) << "No need to recover holes";
259   }
260 }
261 
262 ALWAYS_INLINE
CheckHoleCount(IrtEntry * table,size_t exp_num_holes,IRTSegmentState prev_state,IRTSegmentState cur_state)263 static inline void CheckHoleCount(IrtEntry* table,
264                                   size_t exp_num_holes,
265                                   IRTSegmentState prev_state,
266                                   IRTSegmentState cur_state) {
267   if (kIsDebugBuild) {
268     size_t count = CountNullEntries(table, prev_state.top_index, cur_state.top_index);
269     CHECK_EQ(exp_num_holes, count) << "prevState=" << prev_state.top_index
270                                    << " topIndex=" << cur_state.top_index;
271   }
272 }
273 
Resize(size_t new_size,std::string * error_msg)274 bool IndirectReferenceTable::Resize(size_t new_size, std::string* error_msg) {
275   CHECK_GT(new_size, max_entries_);
276 
277   constexpr size_t kMaxEntries = kMaxTableSizeInBytes / sizeof(IrtEntry);
278   if (new_size > kMaxEntries) {
279     *error_msg = android::base::StringPrintf("Requested size exceeds maximum: %zu", new_size);
280     return false;
281   }
282   // Note: the above check also ensures that there is no overflow below.
283 
284   const size_t table_bytes = RoundUp(new_size * sizeof(IrtEntry), kPageSize);
285 
286   MemMap new_map = NewIRTMap(table_bytes, error_msg);
287   if (!new_map.IsValid()) {
288     return false;
289   }
290 
291   memcpy(new_map.Begin(), table_, max_entries_ * sizeof(IrtEntry));
292   if (!table_mem_map_.IsValid()) {
293     // Didn't have its own map; deallocate old table.
294     Runtime::Current()->GetSmallIrtAllocator()->Deallocate(table_);
295   }
296   table_mem_map_ = std::move(new_map);
297   table_ = reinterpret_cast<IrtEntry*>(table_mem_map_.Begin());
298   const size_t real_new_size = table_bytes / sizeof(IrtEntry);
299   DCHECK_GE(real_new_size, new_size);
300   max_entries_ = real_new_size;
301 
302   return true;
303 }
304 
Add(IRTSegmentState previous_state,ObjPtr<mirror::Object> obj,std::string * error_msg)305 IndirectRef IndirectReferenceTable::Add(IRTSegmentState previous_state,
306                                         ObjPtr<mirror::Object> obj,
307                                         std::string* error_msg) {
308   if (kDebugIRT) {
309     LOG(INFO) << "+++ Add: previous_state=" << previous_state.top_index
310               << " top_index=" << segment_state_.top_index
311               << " last_known_prev_top_index=" << last_known_previous_state_.top_index
312               << " holes=" << current_num_holes_;
313   }
314 
315   size_t top_index = segment_state_.top_index;
316 
317   CHECK(obj != nullptr);
318   VerifyObject(obj);
319   DCHECK(table_ != nullptr);
320 
321   if (top_index == max_entries_) {
322     if (resizable_ == ResizableCapacity::kNo) {
323       std::ostringstream oss;
324       oss << "JNI ERROR (app bug): " << kind_ << " table overflow "
325           << "(max=" << max_entries_ << ")"
326           << MutatorLockedDumpable<IndirectReferenceTable>(*this);
327       *error_msg = oss.str();
328       return nullptr;
329     }
330 
331     // Try to double space.
332     if (std::numeric_limits<size_t>::max() / 2 < max_entries_) {
333       std::ostringstream oss;
334       oss << "JNI ERROR (app bug): " << kind_ << " table overflow "
335           << "(max=" << max_entries_ << ")" << std::endl
336           << MutatorLockedDumpable<IndirectReferenceTable>(*this)
337           << " Resizing failed: exceeds size_t";
338       *error_msg = oss.str();
339       return nullptr;
340     }
341 
342     std::string inner_error_msg;
343     if (!Resize(max_entries_ * 2, &inner_error_msg)) {
344       std::ostringstream oss;
345       oss << "JNI ERROR (app bug): " << kind_ << " table overflow "
346           << "(max=" << max_entries_ << ")" << std::endl
347           << MutatorLockedDumpable<IndirectReferenceTable>(*this)
348           << " Resizing failed: " << inner_error_msg;
349       *error_msg = oss.str();
350       return nullptr;
351     }
352   }
353 
354   RecoverHoles(previous_state);
355   CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
356 
357   // We know there's enough room in the table.  Now we just need to find
358   // the right spot.  If there's a hole, find it and fill it; otherwise,
359   // add to the end of the list.
360   IndirectRef result;
361   size_t index;
362   if (current_num_holes_ > 0) {
363     DCHECK_GT(top_index, 1U);
364     // Find the first hole; likely to be near the end of the list.
365     IrtEntry* p_scan = &table_[top_index - 1];
366     DCHECK(!p_scan->GetReference()->IsNull());
367     --p_scan;
368     while (!p_scan->GetReference()->IsNull()) {
369       DCHECK_GE(p_scan, table_ + previous_state.top_index);
370       --p_scan;
371     }
372     index = p_scan - table_;
373     current_num_holes_--;
374   } else {
375     // Add to the end.
376     index = top_index++;
377     segment_state_.top_index = top_index;
378   }
379   table_[index].Add(obj);
380   result = ToIndirectRef(index);
381   if (kDebugIRT) {
382     LOG(INFO) << "+++ added at " << ExtractIndex(result) << " top=" << segment_state_.top_index
383               << " holes=" << current_num_holes_;
384   }
385 
386   DCHECK(result != nullptr);
387   return result;
388 }
389 
AssertEmpty()390 void IndirectReferenceTable::AssertEmpty() {
391   for (size_t i = 0; i < Capacity(); ++i) {
392     if (!table_[i].GetReference()->IsNull()) {
393       LOG(FATAL) << "Internal Error: non-empty local reference table\n"
394                  << MutatorLockedDumpable<IndirectReferenceTable>(*this);
395       UNREACHABLE();
396     }
397   }
398 }
399 
400 // Removes an object. We extract the table offset bits from "iref"
401 // and zap the corresponding entry, leaving a hole if it's not at the top.
402 // If the entry is not between the current top index and the bottom index
403 // specified by the cookie, we don't remove anything. This is the behavior
404 // required by JNI's DeleteLocalRef function.
405 // This method is not called when a local frame is popped; this is only used
406 // for explicit single removals.
407 // Returns "false" if nothing was removed.
Remove(IRTSegmentState previous_state,IndirectRef iref)408 bool IndirectReferenceTable::Remove(IRTSegmentState previous_state, IndirectRef iref) {
409   if (kDebugIRT) {
410     LOG(INFO) << "+++ Remove: previous_state=" << previous_state.top_index
411               << " top_index=" << segment_state_.top_index
412               << " last_known_prev_top_index=" << last_known_previous_state_.top_index
413               << " holes=" << current_num_holes_;
414   }
415 
416   const uint32_t top_index = segment_state_.top_index;
417   const uint32_t bottom_index = previous_state.top_index;
418 
419   DCHECK(table_ != nullptr);
420 
421   // TODO: We should eagerly check the ref kind against the `kind_` instead of
422   // relying on this weak check and postponing the rest until `CheckEntry()` below.
423   // Passing the wrong kind shall currently result in misleading warnings.
424   if (GetIndirectRefKind(iref) == kJniTransitionOrInvalid) {
425     auto* self = Thread::Current();
426     ScopedObjectAccess soa(self);
427     if (self->IsJniTransitionReference(reinterpret_cast<jobject>(iref))) {
428       auto* env = self->GetJniEnv();
429       DCHECK(env != nullptr);
430       if (env->IsCheckJniEnabled()) {
431         LOG(WARNING) << "Attempt to remove non-JNI local reference, dumping thread";
432         if (kDumpStackOnNonLocalReference) {
433           self->Dump(LOG_STREAM(WARNING));
434         }
435       }
436       return true;
437     }
438   }
439 
440   const uint32_t idx = ExtractIndex(iref);
441   if (idx < bottom_index) {
442     // Wrong segment.
443     LOG(WARNING) << "Attempt to remove index outside index area (" << idx
444                  << " vs " << bottom_index << "-" << top_index << ")";
445     return false;
446   }
447   if (idx >= top_index) {
448     // Bad --- stale reference?
449     LOG(WARNING) << "Attempt to remove invalid index " << idx
450                  << " (bottom=" << bottom_index << " top=" << top_index << ")";
451     return false;
452   }
453 
454   RecoverHoles(previous_state);
455   CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
456 
457   if (idx == top_index - 1) {
458     // Top-most entry.  Scan up and consume holes.
459 
460     if (!CheckEntry("remove", iref, idx)) {
461       return false;
462     }
463 
464     *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr);
465     if (current_num_holes_ != 0) {
466       uint32_t collapse_top_index = top_index;
467       while (--collapse_top_index > bottom_index && current_num_holes_ != 0) {
468         if (kDebugIRT) {
469           ScopedObjectAccess soa(Thread::Current());
470           LOG(INFO) << "+++ checking for hole at " << collapse_top_index - 1
471                     << " (previous_state=" << bottom_index << ") val="
472                     << table_[collapse_top_index - 1].GetReference()->Read<kWithoutReadBarrier>();
473         }
474         if (!table_[collapse_top_index - 1].GetReference()->IsNull()) {
475           break;
476         }
477         if (kDebugIRT) {
478           LOG(INFO) << "+++ ate hole at " << (collapse_top_index - 1);
479         }
480         current_num_holes_--;
481       }
482       segment_state_.top_index = collapse_top_index;
483 
484       CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
485     } else {
486       segment_state_.top_index = top_index - 1;
487       if (kDebugIRT) {
488         LOG(INFO) << "+++ ate last entry " << top_index - 1;
489       }
490     }
491   } else {
492     // Not the top-most entry.  This creates a hole.  We null out the entry to prevent somebody
493     // from deleting it twice and screwing up the hole count.
494     if (table_[idx].GetReference()->IsNull()) {
495       LOG(INFO) << "--- WEIRD: removing null entry " << idx;
496       return false;
497     }
498     if (!CheckEntry("remove", iref, idx)) {
499       return false;
500     }
501 
502     *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr);
503     current_num_holes_++;
504     CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
505     if (kDebugIRT) {
506       LOG(INFO) << "+++ left hole at " << idx << ", holes=" << current_num_holes_;
507     }
508   }
509 
510   return true;
511 }
512 
Trim()513 void IndirectReferenceTable::Trim() {
514   ScopedTrace trace(__PRETTY_FUNCTION__);
515   if (!table_mem_map_.IsValid()) {
516     // Small table; nothing to do here.
517     return;
518   }
519   const size_t top_index = Capacity();
520   uint8_t* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), kPageSize);
521   uint8_t* release_end = static_cast<uint8_t*>(table_mem_map_.BaseEnd());
522   DCHECK_GE(reinterpret_cast<uintptr_t>(release_end), reinterpret_cast<uintptr_t>(release_start));
523   DCHECK_ALIGNED(release_end, kPageSize);
524   DCHECK_ALIGNED(release_end - release_start, kPageSize);
525   if (release_start != release_end) {
526     madvise(release_start, release_end - release_start, MADV_DONTNEED);
527   }
528 }
529 
VisitRoots(RootVisitor * visitor,const RootInfo & root_info)530 void IndirectReferenceTable::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
531   BufferedRootVisitor<kDefaultBufferedRootCount> root_visitor(visitor, root_info);
532   for (auto ref : *this) {
533     if (!ref->IsNull()) {
534       root_visitor.VisitRoot(*ref);
535       DCHECK(!ref->IsNull());
536     }
537   }
538 }
539 
Dump(std::ostream & os) const540 void IndirectReferenceTable::Dump(std::ostream& os) const {
541   os << kind_ << " table dump:\n";
542   ReferenceTable::Table entries;
543   for (size_t i = 0; i < Capacity(); ++i) {
544     ObjPtr<mirror::Object> obj = table_[i].GetReference()->Read<kWithoutReadBarrier>();
545     if (obj != nullptr) {
546       obj = table_[i].GetReference()->Read();
547       entries.push_back(GcRoot<mirror::Object>(obj));
548     }
549   }
550   ReferenceTable::Dump(os, entries);
551 }
552 
SetSegmentState(IRTSegmentState new_state)553 void IndirectReferenceTable::SetSegmentState(IRTSegmentState new_state) {
554   if (kDebugIRT) {
555     LOG(INFO) << "Setting segment state: "
556               << segment_state_.top_index
557               << " -> "
558               << new_state.top_index;
559   }
560   segment_state_ = new_state;
561 }
562 
EnsureFreeCapacity(size_t free_capacity,std::string * error_msg)563 bool IndirectReferenceTable::EnsureFreeCapacity(size_t free_capacity, std::string* error_msg) {
564   DCHECK_GE(free_capacity, static_cast<size_t>(1));
565   if (free_capacity > kMaxTableSizeInBytes) {
566     // Arithmetic might even overflow.
567     *error_msg = "Requested table size implausibly large";
568     return false;
569   }
570   size_t top_index = segment_state_.top_index;
571   if (top_index + free_capacity <= max_entries_) {
572     return true;
573   }
574 
575   // We're only gonna do a simple best-effort here, ensuring the asked-for capacity at the end.
576   if (resizable_ == ResizableCapacity::kNo) {
577     *error_msg = "Table is not resizable";
578     return false;
579   }
580 
581   // Try to increase the table size.
582   if (!Resize(top_index + free_capacity, error_msg)) {
583     LOG(WARNING) << "JNI ERROR: Unable to reserve space in EnsureFreeCapacity (" << free_capacity
584                  << "): " << std::endl
585                  << MutatorLockedDumpable<IndirectReferenceTable>(*this)
586                  << " Resizing failed: " << *error_msg;
587     return false;
588   }
589   return true;
590 }
591 
FreeCapacity() const592 size_t IndirectReferenceTable::FreeCapacity() const {
593   return max_entries_ - segment_state_.top_index;
594 }
595 
596 }  // namespace art
597