• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2022 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "local_reference_table-inl.h"
18 
19 #include "base/bit_utils.h"
20 #include "base/casts.h"
21 #include "base/globals.h"
22 #include "base/mutator_locked_dumpable.h"
23 #include "base/systrace.h"
24 #include "base/utils.h"
25 #include "indirect_reference_table.h"
26 #include "jni/java_vm_ext.h"
27 #include "jni/jni_internal.h"
28 #include "mirror/object-inl.h"
29 #include "nth_caller_visitor.h"
30 #include "reference_table.h"
31 #include "runtime-inl.h"
32 #include "scoped_thread_state_change-inl.h"
33 #include "thread.h"
34 
35 #include <cstdlib>
36 
37 namespace art {
38 namespace jni {
39 
40 static constexpr bool kDumpStackOnNonLocalReference = false;
41 static constexpr bool kDebugLRT = false;
42 
43 // Mmap an "indirect ref table region. Table_bytes is a multiple of a page size.
NewLRTMap(size_t table_bytes,std::string * error_msg)44 static inline MemMap NewLRTMap(size_t table_bytes, std::string* error_msg) {
45   return MemMap::MapAnonymous("local ref table",
46                               table_bytes,
47                               PROT_READ | PROT_WRITE,
48                               /*low_4gb=*/ false,
49                               error_msg);
50 }
51 
SmallLrtAllocator()52 SmallLrtAllocator::SmallLrtAllocator()
53     : free_lists_(kNumSlots, nullptr),
54       shared_lrt_maps_(),
55       lock_("Small LRT allocator lock", LockLevel::kGenericBottomLock) {
56 }
57 
GetIndex(size_t size)58 inline size_t SmallLrtAllocator::GetIndex(size_t size) {
59   DCHECK_GE(size, kSmallLrtEntries);
60   DCHECK_LT(size, kPageSize / sizeof(LrtEntry));
61   DCHECK(IsPowerOfTwo(size));
62   size_t index = WhichPowerOf2(size / kSmallLrtEntries);
63   DCHECK_LT(index, kNumSlots);
64   return index;
65 }
66 
Allocate(size_t size,std::string * error_msg)67 LrtEntry* SmallLrtAllocator::Allocate(size_t size, std::string* error_msg) {
68   size_t index = GetIndex(size);
69   MutexLock lock(Thread::Current(), lock_);
70   size_t fill_from = index;
71   while (fill_from != kNumSlots && free_lists_[fill_from] == nullptr) {
72     ++fill_from;
73   }
74   void* result = nullptr;
75   if (fill_from != kNumSlots) {
76     // We found a slot with enough memory.
77     result = free_lists_[fill_from];
78     free_lists_[fill_from] = *reinterpret_cast<void**>(result);
79   } else {
80     // We need to allocate a new page and split it into smaller pieces.
81     MemMap map = NewLRTMap(kPageSize, error_msg);
82     if (!map.IsValid()) {
83       return nullptr;
84     }
85     result = map.Begin();
86     shared_lrt_maps_.emplace_back(std::move(map));
87   }
88   while (fill_from != index) {
89     --fill_from;
90     // Store the second half of the current buffer in appropriate free list slot.
91     void* mid = reinterpret_cast<uint8_t*>(result) + (kInitialLrtBytes << fill_from);
92     DCHECK(free_lists_[fill_from] == nullptr);
93     *reinterpret_cast<void**>(mid) = nullptr;
94     free_lists_[fill_from] = mid;
95   }
96   // Clear the memory we return to the caller.
97   std::memset(result, 0, kInitialLrtBytes << index);
98   return reinterpret_cast<LrtEntry*>(result);
99 }
100 
Deallocate(LrtEntry * unneeded,size_t size)101 void SmallLrtAllocator::Deallocate(LrtEntry* unneeded, size_t size) {
102   size_t index = GetIndex(size);
103   MutexLock lock(Thread::Current(), lock_);
104   while (index < kNumSlots) {
105     // Check if we can merge this free block with another block with the same size.
106     void** other = reinterpret_cast<void**>(
107         reinterpret_cast<uintptr_t>(unneeded) ^ (kInitialLrtBytes << index));
108     void** before = &free_lists_[index];
109     if (index + 1u == kNumSlots && *before == other && *other == nullptr) {
110       // Do not unmap the page if we do not have other free blocks with index `kNumSlots - 1`.
111       // (Keep at least one free block to avoid a situation where creating and destroying a single
112       // thread with no local references would map and unmap a page in the `SmallLrtAllocator`.)
113       break;
114     }
115     while (*before != nullptr && *before != other) {
116       before = reinterpret_cast<void**>(*before);
117     }
118     if (*before == nullptr) {
119       break;
120     }
121     // Remove `other` from the free list and merge it with the `unneeded` block.
122     DCHECK(*before == other);
123     *before = *reinterpret_cast<void**>(other);
124     ++index;
125     unneeded = reinterpret_cast<LrtEntry*>(
126         reinterpret_cast<uintptr_t>(unneeded) & reinterpret_cast<uintptr_t>(other));
127   }
128   if (index == kNumSlots) {
129     // Free the entire page.
130     DCHECK(free_lists_[kNumSlots - 1u] != nullptr);
131     auto match = [=](MemMap& map) { return unneeded == reinterpret_cast<LrtEntry*>(map.Begin()); };
132     auto it = std::find_if(shared_lrt_maps_.begin(), shared_lrt_maps_.end(), match);
133     DCHECK(it != shared_lrt_maps_.end());
134     shared_lrt_maps_.erase(it);
135     DCHECK(!shared_lrt_maps_.empty());
136     return;
137   }
138   *reinterpret_cast<void**>(unneeded) = free_lists_[index];
139   free_lists_[index] = unneeded;
140 }
141 
LocalReferenceTable(bool check_jni)142 LocalReferenceTable::LocalReferenceTable(bool check_jni)
143     : segment_state_(kLRTFirstSegment),
144       max_entries_(0u),
145       free_entries_list_(
146           FirstFreeField::Update(kFreeListEnd, check_jni ? 1u << kFlagCheckJni : 0u)),
147       small_table_(nullptr),
148       tables_(),
149       table_mem_maps_() {
150 }
151 
SetCheckJniEnabled(bool enabled)152 void LocalReferenceTable::SetCheckJniEnabled(bool enabled) {
153   free_entries_list_ =
154       (free_entries_list_ & ~(1u << kFlagCheckJni)) | (enabled ? 1u << kFlagCheckJni : 0u);
155 }
156 
Initialize(size_t max_count,std::string * error_msg)157 bool LocalReferenceTable::Initialize(size_t max_count, std::string* error_msg) {
158   CHECK(error_msg != nullptr);
159 
160   // Overflow and maximum check.
161   CHECK_LE(max_count, kMaxTableSizeInBytes / sizeof(LrtEntry));
162   if (IsCheckJniEnabled()) {
163     CHECK_LE(max_count, kMaxTableSizeInBytes / sizeof(LrtEntry) / kCheckJniEntriesPerReference);
164     max_count *= kCheckJniEntriesPerReference;
165   }
166 
167   SmallLrtAllocator* small_lrt_allocator = Runtime::Current()->GetSmallLrtAllocator();
168   LrtEntry* first_table = small_lrt_allocator->Allocate(kSmallLrtEntries, error_msg);
169   if (first_table == nullptr) {
170     DCHECK(!error_msg->empty());
171     return false;
172   }
173   DCHECK_ALIGNED(first_table, kCheckJniEntriesPerReference * sizeof(LrtEntry));
174   small_table_ = first_table;
175   max_entries_ = kSmallLrtEntries;
176   return (max_count <= kSmallLrtEntries) || Resize(max_count, error_msg);
177 }
178 
~LocalReferenceTable()179 LocalReferenceTable::~LocalReferenceTable() {
180   SmallLrtAllocator* small_lrt_allocator =
181       max_entries_ != 0u ? Runtime::Current()->GetSmallLrtAllocator() : nullptr;
182   if (small_table_ != nullptr) {
183     small_lrt_allocator->Deallocate(small_table_, kSmallLrtEntries);
184     DCHECK(tables_.empty());
185   } else {
186     size_t num_small_tables = std::min(tables_.size(), MaxSmallTables());
187     for (size_t i = 0; i != num_small_tables; ++i) {
188       small_lrt_allocator->Deallocate(tables_[i], GetTableSize(i));
189     }
190   }
191 }
192 
Resize(size_t new_size,std::string * error_msg)193 bool LocalReferenceTable::Resize(size_t new_size, std::string* error_msg) {
194   DCHECK_GE(max_entries_, kSmallLrtEntries);
195   DCHECK(IsPowerOfTwo(max_entries_));
196   DCHECK_GT(new_size, max_entries_);
197   DCHECK_LE(new_size, kMaxTableSizeInBytes / sizeof(LrtEntry));
198   size_t required_size = RoundUpToPowerOfTwo(new_size);
199   size_t num_required_tables = NumTablesForSize(required_size);
200   DCHECK_GE(num_required_tables, 2u);
201   // Delay moving the `small_table_` to `tables_` until after the next table allocation succeeds.
202   size_t num_tables = (small_table_ != nullptr) ? 1u : tables_.size();
203   DCHECK_EQ(num_tables, NumTablesForSize(max_entries_));
204   for (; num_tables != num_required_tables; ++num_tables) {
205     size_t new_table_size = GetTableSize(num_tables);
206     if (num_tables < MaxSmallTables()) {
207       SmallLrtAllocator* small_lrt_allocator = Runtime::Current()->GetSmallLrtAllocator();
208       LrtEntry* new_table = small_lrt_allocator->Allocate(new_table_size, error_msg);
209       if (new_table == nullptr) {
210         DCHECK(!error_msg->empty());
211         return false;
212       }
213       DCHECK_ALIGNED(new_table, kCheckJniEntriesPerReference * sizeof(LrtEntry));
214       tables_.push_back(new_table);
215     } else {
216       MemMap new_map = NewLRTMap(new_table_size * sizeof(LrtEntry), error_msg);
217       if (!new_map.IsValid()) {
218         DCHECK(!error_msg->empty());
219         return false;
220       }
221       DCHECK_ALIGNED(new_map.Begin(), kCheckJniEntriesPerReference * sizeof(LrtEntry));
222       tables_.push_back(reinterpret_cast<LrtEntry*>(new_map.Begin()));
223       table_mem_maps_.push_back(std::move(new_map));
224     }
225     DCHECK_EQ(num_tables == 1u, small_table_ != nullptr);
226     if (num_tables == 1u) {
227       tables_.insert(tables_.begin(), small_table_);
228       small_table_ = nullptr;
229     }
230     // Record the new available capacity after each successful allocation.
231     DCHECK_EQ(max_entries_, new_table_size);
232     max_entries_ = 2u * new_table_size;
233   }
234   DCHECK_EQ(num_required_tables, tables_.size());
235   return true;
236 }
237 
238 template <typename EntryGetter>
PrunePoppedFreeEntries(EntryGetter && get_entry)239 inline void LocalReferenceTable::PrunePoppedFreeEntries(EntryGetter&& get_entry) {
240   const uint32_t top_index = segment_state_.top_index;
241   uint32_t free_entries_list = free_entries_list_;
242   uint32_t free_entry_index = FirstFreeField::Decode(free_entries_list);
243   DCHECK_NE(free_entry_index, kFreeListEnd);
244   DCHECK_GE(free_entry_index, top_index);
245   do {
246     free_entry_index = get_entry(free_entry_index)->GetNextFree();
247   } while (free_entry_index != kFreeListEnd && free_entry_index >= top_index);
248   free_entries_list_ = FirstFreeField::Update(free_entry_index, free_entries_list);
249 }
250 
IncrementSerialNumber(LrtEntry * serial_number_entry)251 inline uint32_t LocalReferenceTable::IncrementSerialNumber(LrtEntry* serial_number_entry) {
252   DCHECK_EQ(serial_number_entry, GetCheckJniSerialNumberEntry(serial_number_entry));
253   // The old serial number can be 0 if it was not used before. It can also be bits from the
254   // representation of an object reference, or a link to the next free entry written in this
255   // slot before enabling the CheckJNI. (Some gtests repeatedly enable and disable CheckJNI.)
256   uint32_t old_serial_number =
257       serial_number_entry->GetSerialNumberUnchecked() % kCheckJniEntriesPerReference;
258   uint32_t new_serial_number =
259       (old_serial_number + 1u) != kCheckJniEntriesPerReference ? old_serial_number + 1u : 1u;
260   DCHECK(IsValidSerialNumber(new_serial_number));
261   serial_number_entry->SetSerialNumber(new_serial_number);
262   return new_serial_number;
263 }
264 
Add(LRTSegmentState previous_state,ObjPtr<mirror::Object> obj,std::string * error_msg)265 IndirectRef LocalReferenceTable::Add(LRTSegmentState previous_state,
266                                      ObjPtr<mirror::Object> obj,
267                                      std::string* error_msg) {
268   if (kDebugLRT) {
269     LOG(INFO) << "+++ Add: previous_state=" << previous_state.top_index
270               << " top_index=" << segment_state_.top_index;
271   }
272 
273   DCHECK(obj != nullptr);
274   VerifyObject(obj);
275 
276   DCHECK_LE(previous_state.top_index, segment_state_.top_index);
277   DCHECK(max_entries_ == kSmallLrtEntries ? small_table_ != nullptr : !tables_.empty());
278 
279   auto store_obj = [obj, this](LrtEntry* free_entry, const char* tag)
280       REQUIRES_SHARED(Locks::mutator_lock_) {
281     free_entry->SetReference(obj);
282     IndirectRef result = ToIndirectRef(free_entry);
283     if (kDebugLRT) {
284       LOG(INFO) << "+++ " << tag << ": added at index " << GetReferenceEntryIndex(result)
285                 << ", top=" << segment_state_.top_index;
286     }
287     return result;
288   };
289 
290   // Fast-path for small table with CheckJNI disabled.
291   uint32_t top_index = segment_state_.top_index;
292   LrtEntry* const small_table = small_table_;
293   if (LIKELY(small_table != nullptr)) {
294     DCHECK_EQ(max_entries_, kSmallLrtEntries);
295     DCHECK_LE(segment_state_.top_index, kSmallLrtEntries);
296     auto get_entry = [small_table](uint32_t index) ALWAYS_INLINE {
297       DCHECK_LT(index, kSmallLrtEntries);
298       return &small_table[index];
299     };
300     if (LIKELY(free_entries_list_ == kEmptyFreeListAndCheckJniDisabled)) {
301       if (LIKELY(top_index != kSmallLrtEntries)) {
302         LrtEntry* free_entry = get_entry(top_index);
303         segment_state_.top_index = top_index + 1u;
304         return store_obj(free_entry, "small_table/empty-free-list");
305       }
306     } else if (LIKELY(!IsCheckJniEnabled())) {
307       uint32_t first_free_index = GetFirstFreeIndex();
308       DCHECK_NE(first_free_index, kFreeListEnd);
309       if (UNLIKELY(first_free_index >= top_index)) {
310         PrunePoppedFreeEntries(get_entry);
311         first_free_index = GetFirstFreeIndex();
312       }
313       if (first_free_index != kFreeListEnd && first_free_index >= previous_state.top_index) {
314         DCHECK_LT(first_free_index, segment_state_.top_index);  // Popped entries pruned above.
315         LrtEntry* free_entry = get_entry(first_free_index);
316         // Use the `free_entry` only if it was created with CheckJNI disabled.
317         LrtEntry* serial_number_entry = GetCheckJniSerialNumberEntry(free_entry);
318         if (!serial_number_entry->IsSerialNumber()) {
319           free_entries_list_ = FirstFreeField::Update(free_entry->GetNextFree(), 0u);
320           return store_obj(free_entry, "small_table/reuse-empty-slot");
321         }
322       }
323       if (top_index != kSmallLrtEntries) {
324         LrtEntry* free_entry = get_entry(top_index);
325         segment_state_.top_index = top_index + 1u;
326         return store_obj(free_entry, "small_table/pruned-free-list");
327       }
328     }
329   }
330   DCHECK(IsCheckJniEnabled() || small_table == nullptr || top_index == kSmallLrtEntries);
331 
332   // Process free list: prune, reuse free entry or pad for CheckJNI.
333   uint32_t first_free_index = GetFirstFreeIndex();
334   if (first_free_index != kFreeListEnd && first_free_index >= top_index) {
335     PrunePoppedFreeEntries([&](size_t index) { return GetEntry(index); });
336     first_free_index = GetFirstFreeIndex();
337   }
338   if (first_free_index != kFreeListEnd && first_free_index >= previous_state.top_index) {
339     // Reuse the free entry if it was created with the same CheckJNI setting.
340     DCHECK_LT(first_free_index, top_index);  // Popped entries have been pruned above.
341     LrtEntry* free_entry = GetEntry(first_free_index);
342     LrtEntry* serial_number_entry = GetCheckJniSerialNumberEntry(free_entry);
343     if (serial_number_entry->IsSerialNumber() == IsCheckJniEnabled()) {
344       free_entries_list_ = FirstFreeField::Update(free_entry->GetNextFree(), free_entries_list_);
345       if (UNLIKELY(IsCheckJniEnabled())) {
346         DCHECK_NE(free_entry, serial_number_entry);
347         uint32_t serial_number = IncrementSerialNumber(serial_number_entry);
348         free_entry = serial_number_entry + serial_number;
349         DCHECK_EQ(
350             free_entry,
351             GetEntry(RoundDown(first_free_index, kCheckJniEntriesPerReference) + serial_number));
352       }
353       return store_obj(free_entry, "reuse-empty-slot");
354     }
355   }
356   if (UNLIKELY(IsCheckJniEnabled()) && !IsAligned<kCheckJniEntriesPerReference>(top_index)) {
357     // Add non-CheckJNI holes up to the next serial number entry.
358     for (; !IsAligned<kCheckJniEntriesPerReference>(top_index); ++top_index) {
359       GetEntry(top_index)->SetNextFree(first_free_index);
360       first_free_index = top_index;
361     }
362     free_entries_list_ = FirstFreeField::Update(first_free_index, 1u << kFlagCheckJni);
363     segment_state_.top_index = top_index;
364   }
365 
366   // Resize (double the space) if needed.
367   if (UNLIKELY(top_index == max_entries_)) {
368     static_assert(IsPowerOfTwo(kMaxTableSizeInBytes));
369     static_assert(IsPowerOfTwo(sizeof(LrtEntry)));
370     DCHECK(IsPowerOfTwo(max_entries_));
371     if (kMaxTableSizeInBytes == max_entries_ * sizeof(LrtEntry)) {
372       std::ostringstream oss;
373       oss << "JNI ERROR (app bug): " << kLocal << " table overflow "
374           << "(max=" << max_entries_ << ")" << std::endl
375           << MutatorLockedDumpable<LocalReferenceTable>(*this)
376           << " Resizing failed: Cannot resize over the maximum permitted size.";
377       *error_msg = oss.str();
378       return nullptr;
379     }
380 
381     std::string inner_error_msg;
382     if (!Resize(max_entries_ * 2u, &inner_error_msg)) {
383       std::ostringstream oss;
384       oss << "JNI ERROR (app bug): " << kLocal << " table overflow "
385           << "(max=" << max_entries_ << ")" << std::endl
386           << MutatorLockedDumpable<LocalReferenceTable>(*this)
387           << " Resizing failed: " << inner_error_msg;
388       *error_msg = oss.str();
389       return nullptr;
390     }
391   }
392 
393   // Use the next entry.
394   if (UNLIKELY(IsCheckJniEnabled())) {
395     DCHECK_ALIGNED(top_index, kCheckJniEntriesPerReference);
396     DCHECK_ALIGNED(previous_state.top_index, kCheckJniEntriesPerReference);
397     DCHECK_ALIGNED(max_entries_, kCheckJniEntriesPerReference);
398     LrtEntry* serial_number_entry = GetEntry(top_index);
399     uint32_t serial_number = IncrementSerialNumber(serial_number_entry);
400     LrtEntry* free_entry = serial_number_entry + serial_number;
401     DCHECK_EQ(free_entry, GetEntry(top_index + serial_number));
402     segment_state_.top_index = top_index + kCheckJniEntriesPerReference;
403     return store_obj(free_entry, "slow-path/check-jni");
404   }
405   LrtEntry* free_entry = GetEntry(top_index);
406   segment_state_.top_index = top_index + 1u;
407   return store_obj(free_entry, "slow-path");
408 }
409 
410 // Removes an object.
411 //
412 // This method is not called when a local frame is popped; this is only used
413 // for explicit single removals.
414 //
415 // If the entry is not at the top, we just add it to the free entry list.
416 // If the entry is at the top, we pop it from the top and check if there are
417 // free entries under it to remove in order to reduce the size of the table.
418 //
419 // Returns "false" if nothing was removed.
Remove(LRTSegmentState previous_state,IndirectRef iref)420 bool LocalReferenceTable::Remove(LRTSegmentState previous_state, IndirectRef iref) {
421   if (kDebugLRT) {
422     LOG(INFO) << "+++ Remove: previous_state=" << previous_state.top_index
423               << " top_index=" << segment_state_.top_index;
424   }
425 
426   IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(iref);
427   if (UNLIKELY(kind != kLocal)) {
428     Thread* self = Thread::Current();
429     if (kind == kJniTransition) {
430       if (self->IsJniTransitionReference(reinterpret_cast<jobject>(iref))) {
431         // Transition references count as local but they cannot be deleted.
432         // TODO: They could actually be cleared on the stack, except for the `jclass`
433         // reference for static methods that points to the method's declaring class.
434         JNIEnvExt* env = self->GetJniEnv();
435         DCHECK(env != nullptr);
436         if (env->IsCheckJniEnabled()) {
437           const char* msg = kDumpStackOnNonLocalReference
438               ? "Attempt to remove non-JNI local reference, dumping thread"
439               : "Attempt to remove non-JNI local reference";
440           LOG(WARNING) << msg;
441           if (kDumpStackOnNonLocalReference) {
442             self->Dump(LOG_STREAM(WARNING));
443           }
444         }
445         return true;
446       }
447     }
448     if (kDumpStackOnNonLocalReference && IsCheckJniEnabled()) {
449       // Log the error message and stack. Repeat the message as FATAL later.
450       LOG(ERROR) << "Attempt to delete " << kind
451                  << " reference as local JNI reference, dumping stack";
452       self->Dump(LOG_STREAM(ERROR));
453     }
454     LOG(IsCheckJniEnabled() ? ERROR : FATAL)
455         << "Attempt to delete " << kind << " reference as local JNI reference";
456     return false;
457   }
458 
459   DCHECK_LE(previous_state.top_index, segment_state_.top_index);
460   DCHECK(max_entries_ == kSmallLrtEntries ? small_table_ != nullptr : !tables_.empty());
461   DCheckValidReference(iref);
462 
463   LrtEntry* entry = ToLrtEntry(iref);
464   uint32_t entry_index = GetReferenceEntryIndex(iref);
465   uint32_t top_index = segment_state_.top_index;
466   const uint32_t bottom_index = previous_state.top_index;
467 
468   if (entry_index < bottom_index) {
469     // Wrong segment.
470     LOG(WARNING) << "Attempt to remove index outside index area (" << entry_index
471                  << " vs " << bottom_index << "-" << top_index << ")";
472     return false;
473   }
474 
475   if (UNLIKELY(IsCheckJniEnabled())) {
476     // Ignore invalid references. CheckJNI should have aborted before passing this reference
477     // to `LocalReferenceTable::Remove()` but gtests intercept the abort and proceed anyway.
478     std::string error_msg;
479     if (!IsValidReference(iref, &error_msg)) {
480       LOG(WARNING) << "Attempt to remove invalid reference: " << error_msg;
481       return false;
482     }
483   }
484   DCHECK_LT(entry_index, top_index);
485 
486   // Prune the free entry list if a segment with holes was popped before the `Remove()` call.
487   uint32_t first_free_index = GetFirstFreeIndex();
488   if (first_free_index != kFreeListEnd && first_free_index >= top_index) {
489     PrunePoppedFreeEntries([&](size_t index) { return GetEntry(index); });
490   }
491 
492   // Check if we're removing the top entry (created with any CheckJNI setting).
493   bool is_top_entry = false;
494   uint32_t prune_end = entry_index;
495   if (GetCheckJniSerialNumberEntry(entry)->IsSerialNumber()) {
496     LrtEntry* serial_number_entry = GetCheckJniSerialNumberEntry(entry);
497     uint32_t serial_number = dchecked_integral_cast<uint32_t>(entry - serial_number_entry);
498     DCHECK_EQ(serial_number, serial_number_entry->GetSerialNumber());
499     prune_end = entry_index - serial_number;
500     is_top_entry = (prune_end == top_index - kCheckJniEntriesPerReference);
501   } else {
502     is_top_entry = (entry_index == top_index - 1u);
503   }
504   if (is_top_entry) {
505     // Top-most entry. Scan up and consume holes created with the current CheckJNI setting.
506     constexpr uint32_t kDeadLocalValue = 0xdead10c0;
507     entry->SetReference(reinterpret_cast32<mirror::Object*>(kDeadLocalValue));
508 
509     // TODO: Maybe we should not prune free entries from the top of the segment
510     // because it has quadratic worst-case complexity. We could still prune while
511     // the first free list entry is at the top.
512     uint32_t prune_start = prune_end;
513     size_t prune_count;
514     auto find_prune_range = [&](size_t chunk_size, auto is_prev_entry_free) {
515       while (prune_start > bottom_index && is_prev_entry_free(prune_start)) {
516         prune_start -= chunk_size;
517       }
518       prune_count = (prune_end - prune_start) / chunk_size;
519     };
520 
521     if (UNLIKELY(IsCheckJniEnabled())) {
522       auto is_prev_entry_free = [&](size_t index) {
523         DCHECK_ALIGNED(index, kCheckJniEntriesPerReference);
524         LrtEntry* serial_number_entry = GetEntry(index - kCheckJniEntriesPerReference);
525         DCHECK_ALIGNED(serial_number_entry, kCheckJniEntriesPerReference * sizeof(LrtEntry));
526         if (!serial_number_entry->IsSerialNumber()) {
527           return false;
528         }
529         uint32_t serial_number = serial_number_entry->GetSerialNumber();
530         DCHECK(IsValidSerialNumber(serial_number));
531         LrtEntry* entry = serial_number_entry + serial_number;
532         DCHECK_EQ(entry, GetEntry(prune_start - kCheckJniEntriesPerReference + serial_number));
533         return entry->IsFree();
534       };
535       find_prune_range(kCheckJniEntriesPerReference, is_prev_entry_free);
536     } else {
537       auto is_prev_entry_free = [&](size_t index) {
538         LrtEntry* entry = GetEntry(index - 1u);
539         return entry->IsFree() && !GetCheckJniSerialNumberEntry(entry)->IsSerialNumber();
540       };
541       find_prune_range(1u, is_prev_entry_free);
542     }
543 
544     if (prune_count != 0u) {
545       // Remove pruned entries from the free list.
546       size_t remaining = prune_count;
547       uint32_t free_index = GetFirstFreeIndex();
548       while (remaining != 0u && free_index >= prune_start) {
549         DCHECK_NE(free_index, kFreeListEnd);
550         LrtEntry* pruned_entry = GetEntry(free_index);
551         free_index = pruned_entry->GetNextFree();
552         pruned_entry->SetReference(reinterpret_cast32<mirror::Object*>(kDeadLocalValue));
553         --remaining;
554       }
555       free_entries_list_ = FirstFreeField::Update(free_index, free_entries_list_);
556       while (remaining != 0u) {
557         DCHECK_NE(free_index, kFreeListEnd);
558         DCHECK_LT(free_index, prune_start);
559         DCHECK_GE(free_index, bottom_index);
560         LrtEntry* free_entry = GetEntry(free_index);
561         while (free_entry->GetNextFree() < prune_start) {
562           free_index = free_entry->GetNextFree();
563           DCHECK_GE(free_index, bottom_index);
564           free_entry = GetEntry(free_index);
565         }
566         LrtEntry* pruned_entry = GetEntry(free_entry->GetNextFree());
567         free_entry->SetNextFree(pruned_entry->GetNextFree());
568         pruned_entry->SetReference(reinterpret_cast32<mirror::Object*>(kDeadLocalValue));
569         --remaining;
570       }
571       DCHECK(free_index == kFreeListEnd || free_index < prune_start)
572           << "free_index=" << free_index << ", prune_start=" << prune_start;
573     }
574     segment_state_.top_index = prune_start;
575     if (kDebugLRT) {
576       LOG(INFO) << "+++ removed last entry, pruned " << prune_count
577                 << ", new top= " << segment_state_.top_index;
578     }
579   } else {
580     // Not the top-most entry. This creates a hole.
581     entry->SetNextFree(GetFirstFreeIndex());
582     free_entries_list_ = FirstFreeField::Update(entry_index, free_entries_list_);
583     if (kDebugLRT) {
584       LOG(INFO) << "+++ removed entry and left hole at " << entry_index;
585     }
586   }
587 
588   return true;
589 }
590 
AssertEmpty()591 void LocalReferenceTable::AssertEmpty() {
592   CHECK_EQ(Capacity(), 0u) << "Internal Error: non-empty local reference table.";
593 }
594 
Trim()595 void LocalReferenceTable::Trim() {
596   ScopedTrace trace(__PRETTY_FUNCTION__);
597   const size_t num_mem_maps = table_mem_maps_.size();
598   if (num_mem_maps == 0u) {
599     // Only small tables; nothing to do here. (Do not unnecessarily prune popped free entries.)
600     return;
601   }
602   DCHECK_EQ(tables_.size(), num_mem_maps + MaxSmallTables());
603   const size_t top_index = segment_state_.top_index;
604   // Prune popped free entries before potentially losing their memory.
605   if (UNLIKELY(GetFirstFreeIndex() != kFreeListEnd) &&
606       UNLIKELY(GetFirstFreeIndex() >= segment_state_.top_index)) {
607     PrunePoppedFreeEntries([&](size_t index) { return GetEntry(index); });
608   }
609   // Small tables can hold as many entries as the next table.
610   constexpr size_t kSmallTablesCapacity = GetTableSize(MaxSmallTables());
611   size_t mem_map_index = 0u;
612   if (top_index > kSmallTablesCapacity) {
613     const size_t table_size = TruncToPowerOfTwo(top_index);
614     const size_t table_index = NumTablesForSize(table_size);
615     const size_t start_index = top_index - table_size;
616     mem_map_index = table_index - MaxSmallTables();
617     if (start_index != 0u) {
618       ++mem_map_index;
619       LrtEntry* table = tables_[table_index];
620       uint8_t* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table[start_index]), kPageSize);
621       uint8_t* release_end = reinterpret_cast<uint8_t*>(&table[table_size]);
622       DCHECK_GE(reinterpret_cast<uintptr_t>(release_end),
623                 reinterpret_cast<uintptr_t>(release_start));
624       DCHECK_ALIGNED(release_end, kPageSize);
625       DCHECK_ALIGNED(release_end - release_start, kPageSize);
626       if (release_start != release_end) {
627         madvise(release_start, release_end - release_start, MADV_DONTNEED);
628       }
629     }
630   }
631   for (MemMap& mem_map : ArrayRef<MemMap>(table_mem_maps_).SubArray(mem_map_index)) {
632     madvise(mem_map.Begin(), mem_map.Size(), MADV_DONTNEED);
633   }
634 }
635 
636 template <typename Visitor>
VisitRootsInternal(Visitor && visitor) const637 void LocalReferenceTable::VisitRootsInternal(Visitor&& visitor) const {
638   auto visit_table = [&](LrtEntry* table, size_t count) REQUIRES_SHARED(Locks::mutator_lock_) {
639     for (size_t i = 0; i != count; ) {
640       LrtEntry* entry;
641       if (i % kCheckJniEntriesPerReference == 0u && table[i].IsSerialNumber()) {
642         entry = &table[i + table[i].GetSerialNumber()];
643         i += kCheckJniEntriesPerReference;
644         DCHECK_LE(i, count);
645       } else {
646         entry = &table[i];
647         i += 1u;
648       }
649       DCHECK(!entry->IsSerialNumber());
650       if (!entry->IsFree()) {
651         GcRoot<mirror::Object>* root = entry->GetRootAddress();
652         DCHECK(!root->IsNull());
653         visitor(root);
654       }
655     }
656   };
657 
658   if (small_table_ != nullptr) {
659     visit_table(small_table_, segment_state_.top_index);
660   } else {
661     uint32_t remaining = segment_state_.top_index;
662     size_t table_index = 0u;
663     while (remaining != 0u) {
664       size_t count = std::min<size_t>(remaining, GetTableSize(table_index));
665       visit_table(tables_[table_index], count);
666       ++table_index;
667       remaining -= count;
668     }
669   }
670 }
671 
VisitRoots(RootVisitor * visitor,const RootInfo & root_info)672 void LocalReferenceTable::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
673   BufferedRootVisitor<kDefaultBufferedRootCount> root_visitor(visitor, root_info);
674   VisitRootsInternal([&](GcRoot<mirror::Object>* root) REQUIRES_SHARED(Locks::mutator_lock_) {
675                        root_visitor.VisitRoot(*root);
676                      });
677 }
678 
Dump(std::ostream & os) const679 void LocalReferenceTable::Dump(std::ostream& os) const {
680   os << kLocal << " table dump:\n";
681   ReferenceTable::Table entries;
682   VisitRootsInternal([&](GcRoot<mirror::Object>* root) REQUIRES_SHARED(Locks::mutator_lock_) {
683                        entries.push_back(*root);
684                      });
685   ReferenceTable::Dump(os, entries);
686 }
687 
SetSegmentState(LRTSegmentState new_state)688 void LocalReferenceTable::SetSegmentState(LRTSegmentState new_state) {
689   if (kDebugLRT) {
690     LOG(INFO) << "Setting segment state: "
691               << segment_state_.top_index
692               << " -> "
693               << new_state.top_index;
694   }
695   segment_state_ = new_state;
696 }
697 
EnsureFreeCapacity(size_t free_capacity,std::string * error_msg)698 bool LocalReferenceTable::EnsureFreeCapacity(size_t free_capacity, std::string* error_msg) {
699   // TODO: Pass `previous_state` so that we can check holes.
700   DCHECK_GE(free_capacity, static_cast<size_t>(1));
701   size_t top_index = segment_state_.top_index;
702   DCHECK_LE(top_index, max_entries_);
703 
704   if (IsCheckJniEnabled()) {
705     // High values lead to the maximum size check failing below.
706     if (free_capacity >= std::numeric_limits<size_t>::max() / kCheckJniEntriesPerReference) {
707       free_capacity = std::numeric_limits<size_t>::max();
708     } else {
709       free_capacity *= kCheckJniEntriesPerReference;
710     }
711   }
712 
713   // TODO: Include holes from the current segment in the calculation.
714   if (free_capacity <= max_entries_ - top_index) {
715     return true;
716   }
717 
718   if (free_capacity > kMaxTableSize - top_index) {
719     *error_msg = android::base::StringPrintf(
720         "Requested size exceeds maximum: %zu > %zu (%zu used)",
721         free_capacity,
722         kMaxTableSize - top_index,
723         top_index);
724     return false;
725   }
726 
727   // Try to increase the table size.
728   if (!Resize(top_index + free_capacity, error_msg)) {
729     LOG(WARNING) << "JNI ERROR: Unable to reserve space in EnsureFreeCapacity (" << free_capacity
730                  << "): " << std::endl
731                  << MutatorLockedDumpable<LocalReferenceTable>(*this)
732                  << " Resizing failed: " << *error_msg;
733     return false;
734   }
735   return true;
736 }
737 
FreeCapacity() const738 size_t LocalReferenceTable::FreeCapacity() const {
739   // TODO: Include holes in current segment.
740   if (IsCheckJniEnabled()) {
741     DCHECK_ALIGNED(max_entries_, kCheckJniEntriesPerReference);
742     // The `segment_state_.top_index` is not necessarily aligned; rounding down.
743     return (max_entries_ - segment_state_.top_index) / kCheckJniEntriesPerReference;
744   } else {
745     return max_entries_ - segment_state_.top_index;
746   }
747 }
748 
749 }  // namespace jni
750 }  // namespace art
751