1 /*
2 * Copyright (C) 2022 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "local_reference_table-inl.h"
18
19 #include "base/bit_utils.h"
20 #include "base/casts.h"
21 #include "base/globals.h"
22 #include "base/mutator_locked_dumpable.h"
23 #include "base/systrace.h"
24 #include "base/utils.h"
25 #include "indirect_reference_table.h"
26 #include "jni/java_vm_ext.h"
27 #include "jni/jni_internal.h"
28 #include "mirror/object-inl.h"
29 #include "nth_caller_visitor.h"
30 #include "reference_table.h"
31 #include "runtime-inl.h"
32 #include "scoped_thread_state_change-inl.h"
33 #include "thread.h"
34
35 #include <cstdlib>
36
37 namespace art HIDDEN {
38 namespace jni {
39
40 static constexpr bool kDumpStackOnNonLocalReference = false;
41
42 // Mmap an "indirect ref table region. Table_bytes is a multiple of a page size.
NewLRTMap(size_t table_bytes,std::string * error_msg)43 static inline MemMap NewLRTMap(size_t table_bytes, std::string* error_msg) {
44 return MemMap::MapAnonymous("local ref table",
45 table_bytes,
46 PROT_READ | PROT_WRITE,
47 /*low_4gb=*/ false,
48 error_msg);
49 }
50
SmallLrtAllocator()51 SmallLrtAllocator::SmallLrtAllocator()
52 : free_lists_(num_lrt_slots_, nullptr),
53 shared_lrt_maps_(),
54 lock_("Small LRT allocator lock", LockLevel::kGenericBottomLock) {
55 }
56
GetIndex(size_t size)57 inline size_t SmallLrtAllocator::GetIndex(size_t size) {
58 DCHECK_GE(size, kSmallLrtEntries);
59 DCHECK_LT(size, gPageSize / sizeof(LrtEntry));
60 DCHECK(IsPowerOfTwo(size));
61 size_t index = WhichPowerOf2(size / kSmallLrtEntries);
62 DCHECK_LT(index, num_lrt_slots_);
63 return index;
64 }
65
Allocate(size_t size,std::string * error_msg)66 LrtEntry* SmallLrtAllocator::Allocate(size_t size, std::string* error_msg) {
67 size_t index = GetIndex(size);
68 MutexLock lock(Thread::Current(), lock_);
69 size_t fill_from = index;
70 while (fill_from != num_lrt_slots_ && free_lists_[fill_from] == nullptr) {
71 ++fill_from;
72 }
73 void* result = nullptr;
74 if (fill_from != num_lrt_slots_) {
75 // We found a slot with enough memory.
76 result = free_lists_[fill_from];
77 free_lists_[fill_from] = *reinterpret_cast<void**>(result);
78 } else {
79 // We need to allocate a new page and split it into smaller pieces.
80 MemMap map = NewLRTMap(gPageSize, error_msg);
81 if (!map.IsValid()) {
82 return nullptr;
83 }
84 result = map.Begin();
85 shared_lrt_maps_.emplace_back(std::move(map));
86 }
87 while (fill_from != index) {
88 --fill_from;
89 // Store the second half of the current buffer in appropriate free list slot.
90 void* mid = reinterpret_cast<uint8_t*>(result) + (kInitialLrtBytes << fill_from);
91 DCHECK(free_lists_[fill_from] == nullptr);
92 *reinterpret_cast<void**>(mid) = nullptr;
93 free_lists_[fill_from] = mid;
94 }
95 // Clear the memory we return to the caller.
96 std::memset(result, 0, kInitialLrtBytes << index);
97 return reinterpret_cast<LrtEntry*>(result);
98 }
99
Deallocate(LrtEntry * unneeded,size_t size)100 void SmallLrtAllocator::Deallocate(LrtEntry* unneeded, size_t size) {
101 size_t index = GetIndex(size);
102 MutexLock lock(Thread::Current(), lock_);
103 while (index < num_lrt_slots_) {
104 // Check if we can merge this free block with another block with the same size.
105 void** other = reinterpret_cast<void**>(
106 reinterpret_cast<uintptr_t>(unneeded) ^ (kInitialLrtBytes << index));
107 void** before = &free_lists_[index];
108 if (index + 1u == num_lrt_slots_ && *before == other && *other == nullptr) {
109 // Do not unmap the page if we do not have other free blocks with index `num_lrt_slots_ - 1`.
110 // (Keep at least one free block to avoid a situation where creating and destroying a single
111 // thread with no local references would map and unmap a page in the `SmallLrtAllocator`.)
112 break;
113 }
114 while (*before != nullptr && *before != other) {
115 before = reinterpret_cast<void**>(*before);
116 }
117 if (*before == nullptr) {
118 break;
119 }
120 // Remove `other` from the free list and merge it with the `unneeded` block.
121 DCHECK(*before == other);
122 *before = *reinterpret_cast<void**>(other);
123 ++index;
124 unneeded = reinterpret_cast<LrtEntry*>(
125 reinterpret_cast<uintptr_t>(unneeded) & reinterpret_cast<uintptr_t>(other));
126 }
127 if (index == num_lrt_slots_) {
128 // Free the entire page.
129 DCHECK(free_lists_[num_lrt_slots_ - 1u] != nullptr);
130 auto match = [=](MemMap& map) { return unneeded == reinterpret_cast<LrtEntry*>(map.Begin()); };
131 auto it = std::find_if(shared_lrt_maps_.begin(), shared_lrt_maps_.end(), match);
132 DCHECK(it != shared_lrt_maps_.end());
133 shared_lrt_maps_.erase(it);
134 DCHECK(!shared_lrt_maps_.empty());
135 return;
136 }
137 *reinterpret_cast<void**>(unneeded) = free_lists_[index];
138 free_lists_[index] = unneeded;
139 }
140
LocalReferenceTable(bool check_jni)141 LocalReferenceTable::LocalReferenceTable(bool check_jni)
142 : previous_state_(kLRTFirstSegment),
143 segment_state_(kLRTFirstSegment),
144 max_entries_(0u),
145 free_entries_list_(
146 FirstFreeField::Update(kFreeListEnd, check_jni ? 1u << kFlagCheckJni : 0u)),
147 small_table_(nullptr),
148 tables_(),
149 table_mem_maps_() {
150 }
151
SetCheckJniEnabled(bool enabled)152 void LocalReferenceTable::SetCheckJniEnabled(bool enabled) {
153 free_entries_list_ =
154 (free_entries_list_ & ~(1u << kFlagCheckJni)) | (enabled ? 1u << kFlagCheckJni : 0u);
155 }
156
Initialize(size_t max_count,std::string * error_msg)157 bool LocalReferenceTable::Initialize(size_t max_count, std::string* error_msg) {
158 CHECK(error_msg != nullptr);
159
160 // Overflow and maximum check.
161 CHECK_LE(max_count, kMaxTableSizeInBytes / sizeof(LrtEntry));
162 if (IsCheckJniEnabled()) {
163 CHECK_LE(max_count, kMaxTableSizeInBytes / sizeof(LrtEntry) / kCheckJniEntriesPerReference);
164 max_count *= kCheckJniEntriesPerReference;
165 }
166
167 SmallLrtAllocator* small_lrt_allocator = Runtime::Current()->GetSmallLrtAllocator();
168 LrtEntry* first_table = small_lrt_allocator->Allocate(kSmallLrtEntries, error_msg);
169 if (first_table == nullptr) {
170 DCHECK(!error_msg->empty());
171 return false;
172 }
173 DCHECK_ALIGNED(first_table, kCheckJniEntriesPerReference * sizeof(LrtEntry));
174 small_table_ = first_table;
175 max_entries_ = kSmallLrtEntries;
176 return (max_count <= kSmallLrtEntries) || Resize(max_count, error_msg);
177 }
178
~LocalReferenceTable()179 LocalReferenceTable::~LocalReferenceTable() {
180 SmallLrtAllocator* small_lrt_allocator =
181 max_entries_ != 0u ? Runtime::Current()->GetSmallLrtAllocator() : nullptr;
182 if (small_table_ != nullptr) {
183 small_lrt_allocator->Deallocate(small_table_, kSmallLrtEntries);
184 DCHECK(tables_.empty());
185 } else {
186 size_t num_small_tables = std::min(tables_.size(), MaxSmallTables());
187 for (size_t i = 0; i != num_small_tables; ++i) {
188 small_lrt_allocator->Deallocate(tables_[i], GetTableSize(i));
189 }
190 }
191 }
192
Resize(size_t new_size,std::string * error_msg)193 bool LocalReferenceTable::Resize(size_t new_size, std::string* error_msg) {
194 DCHECK_GE(max_entries_, kSmallLrtEntries);
195 DCHECK(IsPowerOfTwo(max_entries_));
196 DCHECK_GT(new_size, max_entries_);
197 DCHECK_LE(new_size, kMaxTableSizeInBytes / sizeof(LrtEntry));
198 size_t required_size = RoundUpToPowerOfTwo(new_size);
199 size_t num_required_tables = NumTablesForSize(required_size);
200 DCHECK_GE(num_required_tables, 2u);
201 // Delay moving the `small_table_` to `tables_` until after the next table allocation succeeds.
202 size_t num_tables = (small_table_ != nullptr) ? 1u : tables_.size();
203 DCHECK_EQ(num_tables, NumTablesForSize(max_entries_));
204 for (; num_tables != num_required_tables; ++num_tables) {
205 size_t new_table_size = GetTableSize(num_tables);
206 if (num_tables < MaxSmallTables()) {
207 SmallLrtAllocator* small_lrt_allocator = Runtime::Current()->GetSmallLrtAllocator();
208 LrtEntry* new_table = small_lrt_allocator->Allocate(new_table_size, error_msg);
209 if (new_table == nullptr) {
210 DCHECK(!error_msg->empty());
211 return false;
212 }
213 DCHECK_ALIGNED(new_table, kCheckJniEntriesPerReference * sizeof(LrtEntry));
214 tables_.push_back(new_table);
215 } else {
216 MemMap new_map = NewLRTMap(new_table_size * sizeof(LrtEntry), error_msg);
217 if (!new_map.IsValid()) {
218 DCHECK(!error_msg->empty());
219 return false;
220 }
221 DCHECK_ALIGNED(new_map.Begin(), kCheckJniEntriesPerReference * sizeof(LrtEntry));
222 tables_.push_back(reinterpret_cast<LrtEntry*>(new_map.Begin()));
223 table_mem_maps_.push_back(std::move(new_map));
224 }
225 DCHECK_EQ(num_tables == 1u, small_table_ != nullptr);
226 if (num_tables == 1u) {
227 tables_.insert(tables_.begin(), small_table_);
228 small_table_ = nullptr;
229 }
230 // Record the new available capacity after each successful allocation.
231 DCHECK_EQ(max_entries_, new_table_size);
232 max_entries_ = 2u * new_table_size;
233 }
234 DCHECK_EQ(num_required_tables, tables_.size());
235 return true;
236 }
237
238 template <typename EntryGetter>
PrunePoppedFreeEntries(EntryGetter && get_entry)239 inline void LocalReferenceTable::PrunePoppedFreeEntries(EntryGetter&& get_entry) {
240 const uint32_t top_index = segment_state_.top_index;
241 uint32_t free_entries_list = free_entries_list_;
242 uint32_t free_entry_index = FirstFreeField::Decode(free_entries_list);
243 DCHECK_NE(free_entry_index, kFreeListEnd);
244 DCHECK_GE(free_entry_index, top_index);
245 do {
246 free_entry_index = get_entry(free_entry_index)->GetNextFree();
247 } while (free_entry_index != kFreeListEnd && free_entry_index >= top_index);
248 free_entries_list_ = FirstFreeField::Update(free_entry_index, free_entries_list);
249 }
250
IncrementSerialNumber(LrtEntry * serial_number_entry)251 inline uint32_t LocalReferenceTable::IncrementSerialNumber(LrtEntry* serial_number_entry) {
252 DCHECK_EQ(serial_number_entry, GetCheckJniSerialNumberEntry(serial_number_entry));
253 // The old serial number can be 0 if it was not used before. It can also be bits from the
254 // representation of an object reference, or a link to the next free entry written in this
255 // slot before enabling the CheckJNI. (Some gtests repeatedly enable and disable CheckJNI.)
256 uint32_t old_serial_number =
257 serial_number_entry->GetSerialNumberUnchecked() % kCheckJniEntriesPerReference;
258 uint32_t new_serial_number =
259 (old_serial_number + 1u) != kCheckJniEntriesPerReference ? old_serial_number + 1u : 1u;
260 DCHECK(IsValidSerialNumber(new_serial_number));
261 serial_number_entry->SetSerialNumber(new_serial_number);
262 return new_serial_number;
263 }
264
Add(ObjPtr<mirror::Object> obj,std::string * error_msg)265 IndirectRef LocalReferenceTable::Add(ObjPtr<mirror::Object> obj, std::string* error_msg) {
266 if (kDebugLRT) {
267 LOG(INFO) << "+++ Add: previous_state=" << previous_state_.top_index
268 << " top_index=" << segment_state_.top_index;
269 }
270
271 DCHECK(obj != nullptr);
272 VerifyObject(obj);
273
274 DCHECK_LE(previous_state_.top_index, segment_state_.top_index);
275 DCHECK(max_entries_ == kSmallLrtEntries ? small_table_ != nullptr : !tables_.empty());
276
277 auto store_obj = [obj, this](LrtEntry* free_entry, const char* tag)
278 REQUIRES_SHARED(Locks::mutator_lock_) {
279 free_entry->SetReference(obj);
280 IndirectRef result = ToIndirectRef(free_entry);
281 if (kDebugLRT) {
282 LOG(INFO) << "+++ " << tag << ": added at index " << GetReferenceEntryIndex(result)
283 << ", top=" << segment_state_.top_index;
284 }
285 return result;
286 };
287
288 // Fast-path for small table with CheckJNI disabled.
289 uint32_t top_index = segment_state_.top_index;
290 LrtEntry* const small_table = small_table_;
291 if (LIKELY(small_table != nullptr)) {
292 DCHECK_EQ(max_entries_, kSmallLrtEntries);
293 DCHECK_LE(segment_state_.top_index, kSmallLrtEntries);
294 auto get_entry = [small_table](uint32_t index) ALWAYS_INLINE {
295 DCHECK_LT(index, kSmallLrtEntries);
296 return &small_table[index];
297 };
298 if (LIKELY(free_entries_list_ == kEmptyFreeListAndCheckJniDisabled)) {
299 if (LIKELY(top_index != kSmallLrtEntries)) {
300 LrtEntry* free_entry = get_entry(top_index);
301 segment_state_.top_index = top_index + 1u;
302 return store_obj(free_entry, "small_table/empty-free-list");
303 }
304 } else if (LIKELY(!IsCheckJniEnabled())) {
305 uint32_t first_free_index = GetFirstFreeIndex();
306 DCHECK_NE(first_free_index, kFreeListEnd);
307 if (UNLIKELY(first_free_index >= top_index)) {
308 PrunePoppedFreeEntries(get_entry);
309 first_free_index = GetFirstFreeIndex();
310 }
311 if (first_free_index != kFreeListEnd && first_free_index >= previous_state_.top_index) {
312 DCHECK_LT(first_free_index, segment_state_.top_index); // Popped entries pruned above.
313 LrtEntry* free_entry = get_entry(first_free_index);
314 // Use the `free_entry` only if it was created with CheckJNI disabled.
315 LrtEntry* serial_number_entry = GetCheckJniSerialNumberEntry(free_entry);
316 if (!serial_number_entry->IsSerialNumber()) {
317 free_entries_list_ = FirstFreeField::Update(free_entry->GetNextFree(), 0u);
318 return store_obj(free_entry, "small_table/reuse-empty-slot");
319 }
320 }
321 if (top_index != kSmallLrtEntries) {
322 LrtEntry* free_entry = get_entry(top_index);
323 segment_state_.top_index = top_index + 1u;
324 return store_obj(free_entry, "small_table/pruned-free-list");
325 }
326 }
327 }
328 DCHECK(IsCheckJniEnabled() || small_table == nullptr || top_index == kSmallLrtEntries);
329
330 // Process free list: prune, reuse free entry or pad for CheckJNI.
331 uint32_t first_free_index = GetFirstFreeIndex();
332 if (first_free_index != kFreeListEnd && first_free_index >= top_index) {
333 PrunePoppedFreeEntries([&](size_t index) { return GetEntry(index); });
334 first_free_index = GetFirstFreeIndex();
335 }
336 if (first_free_index != kFreeListEnd && first_free_index >= previous_state_.top_index) {
337 // Reuse the free entry if it was created with the same CheckJNI setting.
338 DCHECK_LT(first_free_index, top_index); // Popped entries have been pruned above.
339 LrtEntry* free_entry = GetEntry(first_free_index);
340 LrtEntry* serial_number_entry = GetCheckJniSerialNumberEntry(free_entry);
341 if (serial_number_entry->IsSerialNumber() == IsCheckJniEnabled()) {
342 free_entries_list_ = FirstFreeField::Update(free_entry->GetNextFree(), free_entries_list_);
343 if (UNLIKELY(IsCheckJniEnabled())) {
344 DCHECK_NE(free_entry, serial_number_entry);
345 uint32_t serial_number = IncrementSerialNumber(serial_number_entry);
346 free_entry = serial_number_entry + serial_number;
347 DCHECK_EQ(
348 free_entry,
349 GetEntry(RoundDown(first_free_index, kCheckJniEntriesPerReference) + serial_number));
350 }
351 return store_obj(free_entry, "reuse-empty-slot");
352 }
353 }
354 if (UNLIKELY(IsCheckJniEnabled()) && !IsAligned<kCheckJniEntriesPerReference>(top_index)) {
355 // Add non-CheckJNI holes up to the next serial number entry.
356 for (; !IsAligned<kCheckJniEntriesPerReference>(top_index); ++top_index) {
357 GetEntry(top_index)->SetNextFree(first_free_index);
358 first_free_index = top_index;
359 }
360 free_entries_list_ = FirstFreeField::Update(first_free_index, 1u << kFlagCheckJni);
361 segment_state_.top_index = top_index;
362 }
363
364 // Resize (double the space) if needed.
365 if (UNLIKELY(top_index == max_entries_)) {
366 static_assert(IsPowerOfTwo(kMaxTableSizeInBytes));
367 static_assert(IsPowerOfTwo(sizeof(LrtEntry)));
368 DCHECK(IsPowerOfTwo(max_entries_));
369 if (kMaxTableSizeInBytes == max_entries_ * sizeof(LrtEntry)) {
370 std::ostringstream oss;
371 oss << "JNI ERROR (app bug): " << kLocal << " table overflow "
372 << "(max=" << max_entries_ << ")" << std::endl
373 << MutatorLockedDumpable<LocalReferenceTable>(*this)
374 << " Resizing failed: Cannot resize over the maximum permitted size.";
375 *error_msg = oss.str();
376 return nullptr;
377 }
378
379 std::string inner_error_msg;
380 if (!Resize(max_entries_ * 2u, &inner_error_msg)) {
381 std::ostringstream oss;
382 oss << "JNI ERROR (app bug): " << kLocal << " table overflow "
383 << "(max=" << max_entries_ << ")" << std::endl
384 << MutatorLockedDumpable<LocalReferenceTable>(*this)
385 << " Resizing failed: " << inner_error_msg;
386 *error_msg = oss.str();
387 return nullptr;
388 }
389 }
390
391 // Use the next entry.
392 if (UNLIKELY(IsCheckJniEnabled())) {
393 DCHECK_ALIGNED(top_index, kCheckJniEntriesPerReference);
394 DCHECK_ALIGNED(previous_state_.top_index, kCheckJniEntriesPerReference);
395 DCHECK_ALIGNED(max_entries_, kCheckJniEntriesPerReference);
396 LrtEntry* serial_number_entry = GetEntry(top_index);
397 uint32_t serial_number = IncrementSerialNumber(serial_number_entry);
398 LrtEntry* free_entry = serial_number_entry + serial_number;
399 DCHECK_EQ(free_entry, GetEntry(top_index + serial_number));
400 segment_state_.top_index = top_index + kCheckJniEntriesPerReference;
401 return store_obj(free_entry, "slow-path/check-jni");
402 }
403 LrtEntry* free_entry = GetEntry(top_index);
404 segment_state_.top_index = top_index + 1u;
405 return store_obj(free_entry, "slow-path");
406 }
407
408 // Removes an object.
409 //
410 // This method is not called when a local frame is popped; this is only used
411 // for explicit single removals.
412 //
413 // If the entry is not at the top, we just add it to the free entry list.
414 // If the entry is at the top, we pop it from the top and check if there are
415 // free entries under it to remove in order to reduce the size of the table.
416 //
417 // Returns "false" if nothing was removed.
Remove(IndirectRef iref)418 bool LocalReferenceTable::Remove(IndirectRef iref) {
419 if (kDebugLRT) {
420 LOG(INFO) << "+++ Remove: previous_state=" << previous_state_.top_index
421 << " top_index=" << segment_state_.top_index;
422 }
423
424 IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(iref);
425 if (UNLIKELY(kind != kLocal)) {
426 Thread* self = Thread::Current();
427 if (kind == kJniTransition) {
428 if (self->IsJniTransitionReference(reinterpret_cast<jobject>(iref))) {
429 // Transition references count as local but they cannot be deleted.
430 // TODO: They could actually be cleared on the stack, except for the `jclass`
431 // reference for static methods that points to the method's declaring class.
432 JNIEnvExt* env = self->GetJniEnv();
433 DCHECK(env != nullptr);
434 if (env->IsCheckJniEnabled()) {
435 const char* msg = kDumpStackOnNonLocalReference
436 ? "Attempt to remove non-JNI local reference, dumping thread"
437 : "Attempt to remove non-JNI local reference";
438 LOG(WARNING) << msg;
439 if (kDumpStackOnNonLocalReference) {
440 self->Dump(LOG_STREAM(WARNING));
441 }
442 }
443 return true;
444 }
445 }
446 if (kDumpStackOnNonLocalReference && IsCheckJniEnabled()) {
447 // Log the error message and stack. Repeat the message as FATAL later.
448 LOG(ERROR) << "Attempt to delete " << kind
449 << " reference as local JNI reference, dumping stack";
450 self->Dump(LOG_STREAM(ERROR));
451 }
452 LOG(IsCheckJniEnabled() ? ERROR : FATAL)
453 << "Attempt to delete " << kind << " reference as local JNI reference";
454 return false;
455 }
456
457 DCHECK_LE(previous_state_.top_index, segment_state_.top_index);
458 DCHECK(max_entries_ == kSmallLrtEntries ? small_table_ != nullptr : !tables_.empty());
459 DCheckValidReference(iref);
460
461 LrtEntry* entry = ToLrtEntry(iref);
462 uint32_t entry_index = GetReferenceEntryIndex(iref);
463 uint32_t top_index = segment_state_.top_index;
464 const uint32_t bottom_index = previous_state_.top_index;
465
466 if (entry_index < bottom_index) {
467 // Wrong segment.
468 LOG(WARNING) << "Attempt to remove index outside index area (" << entry_index
469 << " vs " << bottom_index << "-" << top_index << ")";
470 return false;
471 }
472
473 if (UNLIKELY(IsCheckJniEnabled())) {
474 // Ignore invalid references. CheckJNI should have aborted before passing this reference
475 // to `LocalReferenceTable::Remove()` but gtests intercept the abort and proceed anyway.
476 std::string error_msg;
477 if (!IsValidReference(iref, &error_msg)) {
478 LOG(WARNING) << "Attempt to remove invalid reference: " << error_msg;
479 return false;
480 }
481 }
482 DCHECK_LT(entry_index, top_index);
483
484 // Workaround for double `DeleteLocalRef` bug. b/298297411
485 if (entry->IsFree()) {
486 // In debug build or with CheckJNI enabled, we would have detected this above.
487 LOG(ERROR) << "App error: `DeleteLocalRef()` on already deleted local ref. b/298297411";
488 return false;
489 }
490
491 // Prune the free entry list if a segment with holes was popped before the `Remove()` call.
492 uint32_t first_free_index = GetFirstFreeIndex();
493 if (first_free_index != kFreeListEnd && first_free_index >= top_index) {
494 PrunePoppedFreeEntries([&](size_t index) { return GetEntry(index); });
495 }
496
497 // Check if we're removing the top entry (created with any CheckJNI setting).
498 bool is_top_entry = false;
499 uint32_t prune_end = entry_index;
500 if (GetCheckJniSerialNumberEntry(entry)->IsSerialNumber()) {
501 LrtEntry* serial_number_entry = GetCheckJniSerialNumberEntry(entry);
502 uint32_t serial_number = dchecked_integral_cast<uint32_t>(entry - serial_number_entry);
503 DCHECK_EQ(serial_number, serial_number_entry->GetSerialNumber());
504 prune_end = entry_index - serial_number;
505 is_top_entry = (prune_end == top_index - kCheckJniEntriesPerReference);
506 } else {
507 is_top_entry = (entry_index == top_index - 1u);
508 }
509 if (is_top_entry) {
510 // Top-most entry. Scan up and consume holes created with the current CheckJNI setting.
511 constexpr uint32_t kDeadLocalValue = 0xdead10c0;
512 entry->SetReference(reinterpret_cast32<mirror::Object*>(kDeadLocalValue));
513
514 // TODO: Maybe we should not prune free entries from the top of the segment
515 // because it has quadratic worst-case complexity. We could still prune while
516 // the first free list entry is at the top.
517 uint32_t prune_start = prune_end;
518 size_t prune_count;
519 auto find_prune_range = [&](size_t chunk_size, auto is_prev_entry_free) {
520 while (prune_start > bottom_index && is_prev_entry_free(prune_start)) {
521 prune_start -= chunk_size;
522 }
523 prune_count = (prune_end - prune_start) / chunk_size;
524 };
525
526 if (UNLIKELY(IsCheckJniEnabled())) {
527 auto is_prev_entry_free = [&](size_t index) {
528 DCHECK_ALIGNED(index, kCheckJniEntriesPerReference);
529 LrtEntry* serial_number_entry = GetEntry(index - kCheckJniEntriesPerReference);
530 DCHECK_ALIGNED(serial_number_entry, kCheckJniEntriesPerReference * sizeof(LrtEntry));
531 if (!serial_number_entry->IsSerialNumber()) {
532 return false;
533 }
534 uint32_t serial_number = serial_number_entry->GetSerialNumber();
535 DCHECK(IsValidSerialNumber(serial_number));
536 LrtEntry* entry = serial_number_entry + serial_number;
537 DCHECK_EQ(entry, GetEntry(prune_start - kCheckJniEntriesPerReference + serial_number));
538 return entry->IsFree();
539 };
540 find_prune_range(kCheckJniEntriesPerReference, is_prev_entry_free);
541 } else {
542 auto is_prev_entry_free = [&](size_t index) {
543 LrtEntry* entry = GetEntry(index - 1u);
544 return entry->IsFree() && !GetCheckJniSerialNumberEntry(entry)->IsSerialNumber();
545 };
546 find_prune_range(1u, is_prev_entry_free);
547 }
548
549 if (prune_count != 0u) {
550 // Remove pruned entries from the free list.
551 size_t remaining = prune_count;
552 uint32_t free_index = GetFirstFreeIndex();
553 while (remaining != 0u && free_index >= prune_start) {
554 DCHECK_NE(free_index, kFreeListEnd);
555 LrtEntry* pruned_entry = GetEntry(free_index);
556 free_index = pruned_entry->GetNextFree();
557 pruned_entry->SetReference(reinterpret_cast32<mirror::Object*>(kDeadLocalValue));
558 --remaining;
559 }
560 free_entries_list_ = FirstFreeField::Update(free_index, free_entries_list_);
561 while (remaining != 0u) {
562 DCHECK_NE(free_index, kFreeListEnd);
563 DCHECK_LT(free_index, prune_start);
564 DCHECK_GE(free_index, bottom_index);
565 LrtEntry* free_entry = GetEntry(free_index);
566 while (free_entry->GetNextFree() < prune_start) {
567 free_index = free_entry->GetNextFree();
568 DCHECK_GE(free_index, bottom_index);
569 free_entry = GetEntry(free_index);
570 }
571 LrtEntry* pruned_entry = GetEntry(free_entry->GetNextFree());
572 free_entry->SetNextFree(pruned_entry->GetNextFree());
573 pruned_entry->SetReference(reinterpret_cast32<mirror::Object*>(kDeadLocalValue));
574 --remaining;
575 }
576 DCHECK(free_index == kFreeListEnd || free_index < prune_start)
577 << "free_index=" << free_index << ", prune_start=" << prune_start;
578 }
579 segment_state_.top_index = prune_start;
580 if (kDebugLRT) {
581 LOG(INFO) << "+++ removed last entry, pruned " << prune_count
582 << ", new top= " << segment_state_.top_index;
583 }
584 } else {
585 // Not the top-most entry. This creates a hole.
586 entry->SetNextFree(GetFirstFreeIndex());
587 free_entries_list_ = FirstFreeField::Update(entry_index, free_entries_list_);
588 if (kDebugLRT) {
589 LOG(INFO) << "+++ removed entry and left hole at " << entry_index;
590 }
591 }
592
593 return true;
594 }
595
AssertEmpty()596 void LocalReferenceTable::AssertEmpty() {
597 CHECK_EQ(Capacity(), 0u) << "Internal Error: non-empty local reference table.";
598 }
599
Trim()600 void LocalReferenceTable::Trim() {
601 ScopedTrace trace(__PRETTY_FUNCTION__);
602 const size_t num_mem_maps = table_mem_maps_.size();
603 if (num_mem_maps == 0u) {
604 // Only small tables; nothing to do here. (Do not unnecessarily prune popped free entries.)
605 return;
606 }
607 DCHECK_EQ(tables_.size(), num_mem_maps + MaxSmallTables());
608 const size_t top_index = segment_state_.top_index;
609 // Prune popped free entries before potentially losing their memory.
610 if (UNLIKELY(GetFirstFreeIndex() != kFreeListEnd) &&
611 UNLIKELY(GetFirstFreeIndex() >= segment_state_.top_index)) {
612 PrunePoppedFreeEntries([&](size_t index) { return GetEntry(index); });
613 }
614 // Small tables can hold as many entries as the next table.
615 const size_t small_tables_capacity = GetTableSize(MaxSmallTables());
616 size_t mem_map_index = 0u;
617 if (top_index > small_tables_capacity) {
618 const size_t table_size = TruncToPowerOfTwo(top_index);
619 const size_t table_index = NumTablesForSize(table_size);
620 const size_t start_index = top_index - table_size;
621 mem_map_index = table_index - MaxSmallTables();
622 if (start_index != 0u) {
623 ++mem_map_index;
624 LrtEntry* table = tables_[table_index];
625 uint8_t* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table[start_index]), gPageSize);
626 uint8_t* release_end = reinterpret_cast<uint8_t*>(&table[table_size]);
627 DCHECK_GE(reinterpret_cast<uintptr_t>(release_end),
628 reinterpret_cast<uintptr_t>(release_start));
629 DCHECK_ALIGNED_PARAM(release_end, gPageSize);
630 DCHECK_ALIGNED_PARAM(release_end - release_start, gPageSize);
631 if (release_start != release_end) {
632 madvise(release_start, release_end - release_start, MADV_DONTNEED);
633 }
634 }
635 }
636 for (MemMap& mem_map : ArrayRef<MemMap>(table_mem_maps_).SubArray(mem_map_index)) {
637 madvise(mem_map.Begin(), mem_map.Size(), MADV_DONTNEED);
638 }
639 }
640
641 template <typename Visitor>
VisitRootsInternal(Visitor && visitor) const642 void LocalReferenceTable::VisitRootsInternal(Visitor&& visitor) const {
643 auto visit_table = [&](LrtEntry* table, size_t count) REQUIRES_SHARED(Locks::mutator_lock_) {
644 for (size_t i = 0; i != count; ) {
645 LrtEntry* entry;
646 if (i % kCheckJniEntriesPerReference == 0u && table[i].IsSerialNumber()) {
647 entry = &table[i + table[i].GetSerialNumber()];
648 i += kCheckJniEntriesPerReference;
649 DCHECK_LE(i, count);
650 } else {
651 entry = &table[i];
652 i += 1u;
653 }
654 DCHECK(!entry->IsSerialNumber());
655 if (!entry->IsFree()) {
656 GcRoot<mirror::Object>* root = entry->GetRootAddress();
657 DCHECK(!root->IsNull());
658 visitor(root);
659 }
660 }
661 };
662
663 if (small_table_ != nullptr) {
664 visit_table(small_table_, segment_state_.top_index);
665 } else {
666 uint32_t remaining = segment_state_.top_index;
667 size_t table_index = 0u;
668 while (remaining != 0u) {
669 size_t count = std::min<size_t>(remaining, GetTableSize(table_index));
670 visit_table(tables_[table_index], count);
671 ++table_index;
672 remaining -= count;
673 }
674 }
675 }
676
VisitRoots(RootVisitor * visitor,const RootInfo & root_info)677 void LocalReferenceTable::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
678 BufferedRootVisitor<kDefaultBufferedRootCount> root_visitor(visitor, root_info);
679 VisitRootsInternal([&](GcRoot<mirror::Object>* root) REQUIRES_SHARED(Locks::mutator_lock_) {
680 root_visitor.VisitRoot(*root);
681 });
682 }
683
Dump(std::ostream & os) const684 void LocalReferenceTable::Dump(std::ostream& os) const {
685 os << kLocal << " table dump:\n";
686 ReferenceTable::Table entries;
687 VisitRootsInternal([&](GcRoot<mirror::Object>* root) REQUIRES_SHARED(Locks::mutator_lock_) {
688 entries.push_back(*root);
689 });
690 ReferenceTable::Dump(os, entries);
691 }
692
EnsureFreeCapacity(size_t free_capacity,std::string * error_msg)693 bool LocalReferenceTable::EnsureFreeCapacity(size_t free_capacity, std::string* error_msg) {
694 // TODO: Pass `previous_state` so that we can check holes.
695 DCHECK_GE(free_capacity, static_cast<size_t>(1));
696 size_t top_index = segment_state_.top_index;
697 DCHECK_LE(top_index, max_entries_);
698
699 if (IsCheckJniEnabled()) {
700 // High values lead to the maximum size check failing below.
701 if (free_capacity >= std::numeric_limits<size_t>::max() / kCheckJniEntriesPerReference) {
702 free_capacity = std::numeric_limits<size_t>::max();
703 } else {
704 free_capacity *= kCheckJniEntriesPerReference;
705 }
706 }
707
708 // TODO: Include holes from the current segment in the calculation.
709 if (free_capacity <= max_entries_ - top_index) {
710 return true;
711 }
712
713 if (free_capacity > kMaxTableSize - top_index) {
714 *error_msg = android::base::StringPrintf(
715 "Requested size exceeds maximum: %zu > %zu (%zu used)",
716 free_capacity,
717 kMaxTableSize - top_index,
718 top_index);
719 return false;
720 }
721
722 // Try to increase the table size.
723 if (!Resize(top_index + free_capacity, error_msg)) {
724 LOG(WARNING) << "JNI ERROR: Unable to reserve space in EnsureFreeCapacity (" << free_capacity
725 << "): " << std::endl
726 << MutatorLockedDumpable<LocalReferenceTable>(*this)
727 << " Resizing failed: " << *error_msg;
728 return false;
729 }
730 return true;
731 }
732
FreeCapacity() const733 size_t LocalReferenceTable::FreeCapacity() const {
734 // TODO: Include holes in current segment.
735 if (IsCheckJniEnabled()) {
736 DCHECK_ALIGNED(max_entries_, kCheckJniEntriesPerReference);
737 // The `segment_state_.top_index` is not necessarily aligned; rounding down.
738 return (max_entries_ - segment_state_.top_index) / kCheckJniEntriesPerReference;
739 } else {
740 return max_entries_ - segment_state_.top_index;
741 }
742 }
743
744 } // namespace jni
745 } // namespace art
746