1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "jni_id_manager.h"
18
19 #include <algorithm>
20 #include <cstdint>
21 #include <type_traits>
22
23 #include "android-base/macros.h"
24 #include "art_field-inl.h"
25 #include "art_method-inl.h"
26 #include "base/globals.h"
27 #include "base/locks.h"
28 #include "base/mutex.h"
29 #include "base/pointer_size.h"
30 #include "class_root-inl.h"
31 #include "gc/allocation_listener.h"
32 #include "gc/heap.h"
33 #include "jni/jni_internal.h"
34 #include "jni_id_type.h"
35 #include "mirror/array-inl.h"
36 #include "mirror/array.h"
37 #include "mirror/class-alloc-inl.h"
38 #include "mirror/class-inl.h"
39 #include "mirror/class.h"
40 #include "mirror/class_ext-inl.h"
41 #include "mirror/object-inl.h"
42 #include "obj_ptr-inl.h"
43 #include "reflective_handle_scope-inl.h"
44 #include "reflective_handle_scope.h"
45 #include "reflective_value_visitor.h"
46 #include "thread-inl.h"
47 #include "thread.h"
48
49 namespace art HIDDEN {
50 namespace jni {
51
52 constexpr bool kTraceIds = false;
53
54 // TODO This whole thing could be done lock & wait free (since we never remove anything from the
55 // ids list). It's not clear this would be worthwile though.
56
57 namespace {
58
IdToIndex(uintptr_t id)59 static constexpr size_t IdToIndex(uintptr_t id) {
60 return id >> 1;
61 }
62
IndexToId(size_t index)63 static constexpr uintptr_t IndexToId(size_t index) {
64 return (index << 1) + 1;
65 }
66
CanUseIdArrays(ArtMethod * t)67 static bool CanUseIdArrays(ArtMethod* t) {
68 // We cannot use ID arrays from the ClassExt object for obsolete and default conflict methods. The
69 // ID arrays hold an ID corresponding to the methods in the methods_list. Obsolete methods aren't
70 // in the method list. For default conflicting methods it is difficult to find the class that
71 // contains the copied method, so we omit using ID arrays. For Default conflicting methods we
72 // cannot use the canonical method because canonicalizing would return a method from one of the
73 // interface classes. If we use that method ID and invoke it via the CallNonVirtual JNI interface,
74 // it wouldn't throw the expected ICCE.
75 return !(t->IsObsolete() || t->IsDefaultConflicting());
76 }
77
78 template <typename ArtType>
GetIds(ObjPtr<mirror::Class> k,ArtType * t)79 ObjPtr<mirror::PointerArray> GetIds(ObjPtr<mirror::Class> k, ArtType* t)
80 REQUIRES_SHARED(Locks::mutator_lock_) {
81 ObjPtr<mirror::Object> ret;
82 if constexpr (std::is_same_v<ArtType, ArtField>) {
83 ret = t->IsStatic() ? k->GetStaticFieldIds() : k->GetInstanceFieldIds();
84 } else {
85 ret = CanUseIdArrays(t) ? k->GetMethodIds() : nullptr;
86 }
87 DCHECK(ret.IsNull() || ret->IsArrayInstance()) << "Should have bailed out early!";
88 if (kIsDebugBuild && !ret.IsNull()) {
89 if (kRuntimePointerSize == PointerSize::k32) {
90 CHECK(ret->IsIntArray());
91 } else {
92 CHECK(ret->IsLongArray());
93 }
94 }
95 return down_cast<mirror::PointerArray*>(ret.Ptr());
96 }
97
98 template <typename ArtType>
99 bool ShouldReturnPointer(ObjPtr<mirror::Class> klass, ArtType* t)
100 REQUIRES_SHARED(Locks::mutator_lock_);
101
102 template <>
ShouldReturnPointer(ObjPtr<mirror::Class> klass,ArtMethod * t)103 bool ShouldReturnPointer(ObjPtr<mirror::Class> klass, [[maybe_unused]] ArtMethod* t) {
104 ObjPtr<mirror::ClassExt> ext(klass->GetExtData());
105 if (ext.IsNull()) {
106 return true;
107 }
108 ObjPtr<mirror::Object> arr = ext->GetJMethodIDs();
109 return arr.IsNull() || !arr->IsArrayInstance();
110 }
111
112 template<>
ShouldReturnPointer(ObjPtr<mirror::Class> klass,ArtField * t)113 bool ShouldReturnPointer(ObjPtr<mirror::Class> klass, ArtField* t) {
114 ObjPtr<mirror::ClassExt> ext(klass->GetExtData());
115 if (ext.IsNull()) {
116 return true;
117 }
118 ObjPtr<mirror::Object> arr = t->IsStatic() ? ext->GetStaticJFieldIDs()
119 : ext->GetInstanceJFieldIDs();
120 return arr.IsNull() || !arr->IsArrayInstance();
121 }
122
123
124 // Forces the appropriate id array to be present if possible. Returns true if allocation was
125 // attempted but failed.
126 template <typename ArtType>
127 bool EnsureIdsArray(Thread* self, ObjPtr<mirror::Class> k, ArtType* t)
128 REQUIRES_SHARED(Locks::mutator_lock_);
129
130 template <>
EnsureIdsArray(Thread * self,ObjPtr<mirror::Class> k,ArtField * field)131 bool EnsureIdsArray(Thread* self, ObjPtr<mirror::Class> k, ArtField* field) {
132 ScopedExceptionStorage ses(self);
133 StackHandleScope<1> hs(self);
134 Handle<mirror::Class> h_k(hs.NewHandle(k));
135 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
136 return false;
137 } else {
138 // NB This modifies the class to allocate the ClassExt and the ids array.
139 field->IsStatic() ? mirror::Class::EnsureStaticFieldIds(h_k)
140 : mirror::Class::EnsureInstanceFieldIds(h_k);
141 }
142 if (self->IsExceptionPending()) {
143 self->AssertPendingOOMException();
144 ses.SuppressOldException("Failed to allocate maps for jmethodIDs. ");
145 return true;
146 }
147 return false;
148 }
149
150 template <>
EnsureIdsArray(Thread * self,ObjPtr<mirror::Class> k,ArtMethod * method)151 bool EnsureIdsArray(Thread* self, ObjPtr<mirror::Class> k, ArtMethod* method) {
152 if (!CanUseIdArrays(method)) {
153 if (kTraceIds) {
154 LOG(INFO) << "jmethodID for Obsolete / Default conflicting method " << method->PrettyMethod()
155 << " requested!";
156 }
157 // No ids array for obsolete / default conflicting methods. Just do a linear scan.
158 return false;
159 }
160 StackHandleScope<1> hs(self);
161 Handle<mirror::Class> h_k(hs.NewHandle(k));
162 if (Locks::mutator_lock_->IsExclusiveHeld(self) || !Locks::mutator_lock_->IsSharedHeld(self)) {
163 return false;
164 } else {
165 // NB This modifies the class to allocate the ClassExt and the ids array.
166 mirror::Class::EnsureMethodIds(h_k);
167 }
168 if (self->IsExceptionPending()) {
169 self->AssertPendingOOMException();
170 return true;
171 }
172 return false;
173 }
174
175 template <typename ArtType>
176 size_t GetIdOffset(ObjPtr<mirror::Class> k, ArtType* t, PointerSize pointer_size)
177 REQUIRES_SHARED(Locks::mutator_lock_);
178 template <>
GetIdOffset(ObjPtr<mirror::Class> k,ArtField * f,PointerSize ptr_size)179 size_t GetIdOffset(ObjPtr<mirror::Class> k, ArtField* f, [[maybe_unused]] PointerSize ptr_size) {
180 return f->IsStatic() ? k->GetStaticFieldIdOffset(f) : k->GetInstanceFieldIdOffset(f);
181 }
182 template <>
GetIdOffset(ObjPtr<mirror::Class> k,ArtMethod * method,PointerSize pointer_size)183 size_t GetIdOffset(ObjPtr<mirror::Class> k, ArtMethod* method, PointerSize pointer_size) {
184 return CanUseIdArrays(method) ? k->GetMethodIdOffset(method, pointer_size) : -1;
185 }
186
187 // Calls the relevant PrettyMethod/PrettyField on the input.
188 template <typename ArtType>
189 std::string PrettyGeneric(ArtType t) REQUIRES_SHARED(Locks::mutator_lock_);
190 template <>
PrettyGeneric(ArtMethod * f)191 std::string PrettyGeneric(ArtMethod* f) {
192 return f->PrettyMethod();
193 }
194 template <>
PrettyGeneric(ReflectiveHandle<ArtMethod> f)195 std::string PrettyGeneric(ReflectiveHandle<ArtMethod> f) {
196 return f->PrettyMethod();
197 }
198 template <>
PrettyGeneric(ArtField * f)199 std::string PrettyGeneric(ArtField* f) {
200 return f->PrettyField();
201 }
202 template <>
PrettyGeneric(ReflectiveHandle<ArtField> f)203 std::string PrettyGeneric(ReflectiveHandle<ArtField> f) {
204 return f->PrettyField();
205 }
206
207 // Checks if the field or method can use the ID array from class extension.
208 template <typename ArtType>
209 bool CanUseIdArrays(ReflectiveHandle<ArtType> t) REQUIRES_SHARED(Locks::mutator_lock_);
210 template <>
CanUseIdArrays(ReflectiveHandle<ArtField> t)211 bool CanUseIdArrays([[maybe_unused]] ReflectiveHandle<ArtField> t) {
212 return true;
213 }
214 template <>
CanUseIdArrays(ReflectiveHandle<ArtMethod> t)215 bool CanUseIdArrays(ReflectiveHandle<ArtMethod> t) {
216 return CanUseIdArrays(t.Get());
217 }
218
219 // Get the canonical (non-copied) version of the field or method. Only relevant for methods.
220 template <typename ArtType>
221 ArtType* Canonicalize(ReflectiveHandle<ArtType> t) REQUIRES_SHARED(Locks::mutator_lock_);
222 template <>
Canonicalize(ReflectiveHandle<ArtField> t)223 ArtField* Canonicalize(ReflectiveHandle<ArtField> t) {
224 return t.Get();
225 }
226 template <>
Canonicalize(ReflectiveHandle<ArtMethod> t)227 ArtMethod* Canonicalize(ReflectiveHandle<ArtMethod> t) {
228 if (UNLIKELY(t->IsCopied())) {
229 return t->GetCanonicalMethod();
230 }
231 return t.Get();
232 }
233
234 }; // namespace
235
236 // We increment the id by 2 each time to allow us to use the LSB as a flag that the ID is an index
237 // and not a pointer. This gives us 2**31 unique methods that can be addressed on 32-bit art, which
238 // should be more than enough.
239 template <>
GetNextId(JniIdType type)240 uintptr_t JniIdManager::GetNextId<ArtField>(JniIdType type) {
241 DCHECK_EQ(type, JniIdType::kIndices);
242 uintptr_t res = next_field_id_;
243 next_field_id_ += 2;
244 CHECK_GT(next_field_id_, res) << "jfieldID Overflow";
245 return res;
246 }
247
248 template <>
GetNextId(JniIdType type)249 uintptr_t JniIdManager::GetNextId<ArtMethod>(JniIdType type) {
250 DCHECK_EQ(type, JniIdType::kIndices);
251 uintptr_t res = next_method_id_;
252 next_method_id_ += 2;
253 CHECK_GT(next_method_id_, res) << "jmethodID Overflow";
254 return res;
255 }
256 template <>
GetGenericMap()257 std::vector<ArtField*>& JniIdManager::GetGenericMap<ArtField>() {
258 return field_id_map_;
259 }
260
261 template <>
GetGenericMap()262 std::vector<ArtMethod*>& JniIdManager::GetGenericMap<ArtMethod>() {
263 return method_id_map_;
264 }
265 template <>
GetLinearSearchStartId(ReflectiveHandle<ArtField> t)266 size_t JniIdManager::GetLinearSearchStartId<ArtField>(
267 [[maybe_unused]] ReflectiveHandle<ArtField> t) {
268 return deferred_allocation_field_id_start_;
269 }
270
271 template <>
GetLinearSearchStartId(ReflectiveHandle<ArtMethod> m)272 size_t JniIdManager::GetLinearSearchStartId<ArtMethod>(ReflectiveHandle<ArtMethod> m) {
273 if (CanUseIdArrays(m)) {
274 // If we are searching because we couldn't allocate because of defer allocate scope, then we
275 // should only look from deferred_allocation_method_id_start_. Once we exit the deferred scope
276 // all these method ids will be updated to the id arrays in the respective ClassExt objects.
277 return deferred_allocation_method_id_start_;
278 } else {
279 // If we cannot use ID arrays, then the method can be anywhere in the list.
280 return 1;
281 }
282 }
283
284 // TODO need to fix races in here with visitors
285 template <typename ArtType>
EncodeGenericId(ReflectiveHandle<ArtType> t)286 uintptr_t JniIdManager::EncodeGenericId(ReflectiveHandle<ArtType> t) {
287 static_assert(std::is_same_v<ArtType, ArtField> || std::is_same_v<ArtType, ArtMethod>,
288 "Expected ArtField or ArtMethod");
289 Runtime* runtime = Runtime::Current();
290 JniIdType id_type = runtime->GetJniIdType();
291 if (id_type == JniIdType::kPointer || t == nullptr) {
292 return reinterpret_cast<uintptr_t>(t.Get());
293 }
294 Thread* self = Thread::Current();
295 ScopedExceptionStorage ses(self);
296 DCHECK(!t->GetDeclaringClass().IsNull()) << "Null declaring class " << PrettyGeneric(t);
297 size_t off = -1;
298 bool allocation_failure = false;
299 // When we cannot use ID arrays, we just fallback to looking through the list to obtain the ID.
300 // These are rare cases so shouldn't be a problem for performance. See CanUseIdArrays for more
301 // information.
302 if (CanUseIdArrays(t)) {
303 off = GetIdOffset(t->GetDeclaringClass(), Canonicalize(t), kRuntimePointerSize);
304 // Here is the earliest point we can suspend.
305 allocation_failure = EnsureIdsArray(self, t->GetDeclaringClass(), t.Get());
306 }
307 if (allocation_failure) {
308 self->AssertPendingOOMException();
309 ses.SuppressOldException("OOM exception while trying to allocate JNI ids.");
310 return 0u;
311 } else if (ShouldReturnPointer(t->GetDeclaringClass(), t.Get())) {
312 // TODO(mythria): Check why we return a pointer here instead of falling back
313 // to the slow path of finding the ID by looping through the ID -> method
314 // map. This seem incorrect. For example, if we are in ScopedEnableSuspendAllJniIdQueries
315 // scope, we don't allocate ID arrays. We would then incorrectly return a
316 // pointer here.
317 return reinterpret_cast<uintptr_t>(t.Get());
318 }
319 ObjPtr<mirror::Class> klass = t->GetDeclaringClass();
320 ObjPtr<mirror::PointerArray> ids(GetIds(klass, t.Get()));
321 uintptr_t cur_id = 0;
322 if (!ids.IsNull()) {
323 DCHECK_GT(ids->GetLength(), static_cast<int32_t>(off)) << " is " << PrettyGeneric(t);
324 DCHECK_LE(0, static_cast<int32_t>(off)) << " is " << PrettyGeneric(t);
325 cur_id = ids->GetElementPtrSize<uintptr_t>(off, kRuntimePointerSize);
326 }
327 if (cur_id != 0) {
328 return cur_id;
329 }
330 WriterMutexLock mu(self, *Locks::jni_id_lock_);
331 ScopedAssertNoThreadSuspension sants("EncodeJniId critical section.");
332 // Check the ids array for a racing id.
333 constexpr std::pair<size_t, size_t> counts {
334 std::is_same_v<ArtType, ArtField> ? 1 : 0,
335 std::is_same_v<ArtType, ArtField> ? 0 : 1,
336 };
337 StackReflectiveHandleScope<counts.first, counts.second> hs(self);
338 t = hs.NewHandle(Canonicalize(t));
339 if (!ids.IsNull()) {
340 // It's possible we got suspended and structurally redefined during the EnsureIdsArray. We need
341 // to get the information again.
342 ids = GetIds(klass, t.Get());
343 off = GetIdOffset(klass, Canonicalize(t), kRuntimePointerSize);
344 CHECK(!ids.IsNull());
345 cur_id = ids->GetElementPtrSize<uintptr_t>(off, kRuntimePointerSize);
346 if (cur_id != 0) {
347 // We were racing some other thread and lost.
348 return cur_id;
349 }
350 } else {
351 // We cannot allocate anything here or don't have an ids array (we might be an obsolete method).
352 DCHECK(!CanUseIdArrays(t) || deferred_allocation_refcount_ > 0u)
353 << "deferred_allocation_refcount_: " << deferred_allocation_refcount_
354 << " t: " << PrettyGeneric(t);
355 // Check to see if we raced and lost to another thread.
356 const std::vector<ArtType*>& vec = GetGenericMap<ArtType>();
357 bool found = false;
358 // simple count-while.
359 size_t search_start_index = IdToIndex(GetLinearSearchStartId(t));
360 size_t index = std::count_if(vec.cbegin() + search_start_index,
361 vec.cend(),
362 [&found, &self, t](const ArtType* candidate) {
363 Locks::mutator_lock_->AssertSharedHeld(self);
364 found = found || candidate == t.Get();
365 return !found;
366 }) +
367 search_start_index;
368 if (found) {
369 // We were either racing some other thread and lost or this thread was asked to encode the
370 // same method multiple times while holding the mutator lock.
371 DCHECK_EQ(vec[index], t.Get())
372 << "Expected: " << PrettyGeneric(vec[index]) << " got " << PrettyGeneric(t)
373 << " at index " << index << " (id: " << IndexToId(index) << ").";
374 return IndexToId(index);
375 }
376 }
377 cur_id = GetNextId<ArtType>(id_type);
378 DCHECK_EQ(cur_id % 2, 1u);
379 size_t cur_index = IdToIndex(cur_id);
380 std::vector<ArtType*>& vec = GetGenericMap<ArtType>();
381 vec.reserve(cur_index + 1);
382 vec.resize(std::max(vec.size(), cur_index + 1), nullptr);
383 vec[cur_index] = t.Get();
384 if (ids.IsNull()) {
385 if (kIsDebugBuild && CanUseIdArrays(t)) {
386 CHECK_NE(deferred_allocation_refcount_, 0u)
387 << "Failed to allocate ids array despite not being forbidden from doing so!";
388 Locks::mutator_lock_->AssertExclusiveHeld(self);
389 }
390 } else {
391 ids->SetElementPtrSize(off, reinterpret_cast<void*>(cur_id), kRuntimePointerSize);
392 }
393 return cur_id;
394 }
395
EncodeFieldId(ArtField * field)396 jfieldID JniIdManager::EncodeFieldId(ArtField* field) {
397 StackArtFieldHandleScope<1> rhs(Thread::Current());
398 return EncodeFieldId(rhs.NewHandle(field));
399 }
400
EncodeFieldId(ReflectiveHandle<ArtField> field)401 jfieldID JniIdManager::EncodeFieldId(ReflectiveHandle<ArtField> field) {
402 auto* res = reinterpret_cast<jfieldID>(EncodeGenericId(field));
403 if (kTraceIds && field != nullptr) {
404 LOG(INFO) << "Returning " << res << " for field " << field->PrettyField();
405 }
406 return res;
407 }
408
EncodeMethodId(ArtMethod * method)409 jmethodID JniIdManager::EncodeMethodId(ArtMethod* method) {
410 StackArtMethodHandleScope<1> rhs(Thread::Current());
411 return EncodeMethodId(rhs.NewHandle(method));
412 }
413
EncodeMethodId(ReflectiveHandle<ArtMethod> method)414 jmethodID JniIdManager::EncodeMethodId(ReflectiveHandle<ArtMethod> method) {
415 auto* res = reinterpret_cast<jmethodID>(EncodeGenericId(method));
416 if (kTraceIds && method != nullptr) {
417 LOG(INFO) << "Returning " << res << " for method " << method->PrettyMethod();
418 }
419 return res;
420 }
421
VisitRoots(RootVisitor * visitor)422 void JniIdManager::VisitRoots(RootVisitor *visitor) {
423 pointer_marker_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
424 }
425
Init(Thread * self)426 void JniIdManager::Init(Thread* self) {
427 // When compiling we don't want to have anything to do with any of this, which is fine since JNI
428 // ids won't be created during AOT compilation. This also means we don't need to do any
429 // complicated stuff with the image-writer.
430 if (!Runtime::Current()->IsAotCompiler()) {
431 // Allocate the marker
432 StackHandleScope<3> hs(self);
433 Handle<mirror::Object> marker_obj(
434 hs.NewHandle(GetClassRoot<mirror::Object>()->AllocObject(self)));
435 CHECK(!marker_obj.IsNull());
436 pointer_marker_ = GcRoot<mirror::Object>(marker_obj.Get());
437 // Manually mark class-ext as having all pointer-ids to avoid any annoying loops.
438 Handle<mirror::Class> class_ext_class(hs.NewHandle(GetClassRoot<mirror::ClassExt>()));
439 mirror::Class::EnsureExtDataPresent(class_ext_class, self);
440 Handle<mirror::ClassExt> class_ext_ext(hs.NewHandle(class_ext_class->GetExtData()));
441 class_ext_ext->SetIdsArraysForClassExtExtData(marker_obj.Get());
442 }
443 }
444
VisitReflectiveTargets(ReflectiveValueVisitor * rvv)445 void JniIdManager::VisitReflectiveTargets(ReflectiveValueVisitor* rvv) {
446 art::WriterMutexLock mu(Thread::Current(), *Locks::jni_id_lock_);
447 for (auto it = field_id_map_.begin(); it != field_id_map_.end(); ++it) {
448 ArtField* old_field = *it;
449 uintptr_t id = IndexToId(std::distance(field_id_map_.begin(), it));
450 ArtField* new_field =
451 rvv->VisitField(old_field, JniIdReflectiveSourceInfo(reinterpret_cast<jfieldID>(id)));
452 if (old_field != new_field) {
453 *it = new_field;
454 ObjPtr<mirror::Class> old_class(old_field->GetDeclaringClass());
455 ObjPtr<mirror::Class> new_class(new_field->GetDeclaringClass());
456 ObjPtr<mirror::ClassExt> old_ext_data(old_class->GetExtData());
457 ObjPtr<mirror::ClassExt> new_ext_data(new_class->GetExtData());
458 if (!old_ext_data.IsNull()) {
459 CHECK(!old_ext_data->HasInstanceFieldPointerIdMarker() &&
460 !old_ext_data->HasStaticFieldPointerIdMarker())
461 << old_class->PrettyClass();
462 // Clear the old field mapping.
463 if (old_field->IsStatic()) {
464 size_t old_off = ArraySlice<ArtField>(old_class->GetSFieldsPtr()).OffsetOf(old_field);
465 ObjPtr<mirror::PointerArray> old_statics(old_ext_data->GetStaticJFieldIDsPointerArray());
466 if (!old_statics.IsNull()) {
467 old_statics->SetElementPtrSize(old_off, 0, kRuntimePointerSize);
468 }
469 } else {
470 size_t old_off = ArraySlice<ArtField>(old_class->GetIFieldsPtr()).OffsetOf(old_field);
471 ObjPtr<mirror::PointerArray> old_instances(
472 old_ext_data->GetInstanceJFieldIDsPointerArray());
473 if (!old_instances.IsNull()) {
474 old_instances->SetElementPtrSize(old_off, 0, kRuntimePointerSize);
475 }
476 }
477 }
478 if (!new_ext_data.IsNull()) {
479 CHECK(!new_ext_data->HasInstanceFieldPointerIdMarker() &&
480 !new_ext_data->HasStaticFieldPointerIdMarker())
481 << new_class->PrettyClass();
482 // Set the new field mapping.
483 if (new_field->IsStatic()) {
484 size_t new_off = ArraySlice<ArtField>(new_class->GetSFieldsPtr()).OffsetOf(new_field);
485 ObjPtr<mirror::PointerArray> new_statics(new_ext_data->GetStaticJFieldIDsPointerArray());
486 if (!new_statics.IsNull()) {
487 new_statics->SetElementPtrSize(new_off, id, kRuntimePointerSize);
488 }
489 } else {
490 size_t new_off = ArraySlice<ArtField>(new_class->GetIFieldsPtr()).OffsetOf(new_field);
491 ObjPtr<mirror::PointerArray> new_instances(
492 new_ext_data->GetInstanceJFieldIDsPointerArray());
493 if (!new_instances.IsNull()) {
494 new_instances->SetElementPtrSize(new_off, id, kRuntimePointerSize);
495 }
496 }
497 }
498 }
499 }
500 for (auto it = method_id_map_.begin(); it != method_id_map_.end(); ++it) {
501 ArtMethod* old_method = *it;
502 uintptr_t id = IndexToId(std::distance(method_id_map_.begin(), it));
503 ArtMethod* new_method =
504 rvv->VisitMethod(old_method, JniIdReflectiveSourceInfo(reinterpret_cast<jmethodID>(id)));
505 if (old_method != new_method) {
506 *it = new_method;
507 ObjPtr<mirror::Class> old_class(old_method->GetDeclaringClass());
508 ObjPtr<mirror::Class> new_class(new_method->GetDeclaringClass());
509 ObjPtr<mirror::ClassExt> old_ext_data(old_class->GetExtData());
510 ObjPtr<mirror::ClassExt> new_ext_data(new_class->GetExtData());
511 if (!old_ext_data.IsNull()) {
512 CHECK(!old_ext_data->HasMethodPointerIdMarker()) << old_class->PrettyClass();
513 // Clear the old method mapping.
514 size_t old_off = ArraySlice<ArtMethod>(old_class->GetMethodsPtr()).OffsetOf(old_method);
515 ObjPtr<mirror::PointerArray> old_methods(old_ext_data->GetJMethodIDsPointerArray());
516 if (!old_methods.IsNull()) {
517 old_methods->SetElementPtrSize(old_off, 0, kRuntimePointerSize);
518 }
519 }
520 if (!new_ext_data.IsNull()) {
521 CHECK(!new_ext_data->HasMethodPointerIdMarker()) << new_class->PrettyClass();
522 // Set the new method mapping.
523 size_t new_off = ArraySlice<ArtMethod>(new_class->GetMethodsPtr()).OffsetOf(new_method);
524 ObjPtr<mirror::PointerArray> new_methods(new_ext_data->GetJMethodIDsPointerArray());
525 if (!new_methods.IsNull()) {
526 new_methods->SetElementPtrSize(new_off, id, kRuntimePointerSize);
527 }
528 }
529 }
530 }
531 }
532
DecodeGenericId(uintptr_t t)533 template <typename ArtType> ArtType* JniIdManager::DecodeGenericId(uintptr_t t) {
534 if (Runtime::Current()->GetJniIdType() == JniIdType::kIndices && (t % 2) == 1) {
535 ReaderMutexLock mu(Thread::Current(), *Locks::jni_id_lock_);
536 size_t index = IdToIndex(t);
537 DCHECK_GT(GetGenericMap<ArtType>().size(), index);
538 return GetGenericMap<ArtType>().at(index);
539 } else {
540 DCHECK_EQ((t % 2), 0u) << "id: " << t;
541 return reinterpret_cast<ArtType*>(t);
542 }
543 }
544
DecodeMethodId(jmethodID method)545 ArtMethod* JniIdManager::DecodeMethodId(jmethodID method) {
546 return DecodeGenericId<ArtMethod>(reinterpret_cast<uintptr_t>(method));
547 }
548
DecodeFieldId(jfieldID field)549 ArtField* JniIdManager::DecodeFieldId(jfieldID field) {
550 return DecodeGenericId<ArtField>(reinterpret_cast<uintptr_t>(field));
551 }
552
GetPointerMarker()553 ObjPtr<mirror::Object> JniIdManager::GetPointerMarker() {
554 return pointer_marker_.Read();
555 }
556
557 // This whole defer system is an annoying requirement to allow us to generate IDs during heap-walks
558 // such as those required for instrumentation tooling.
559 //
560 // The defer system works with the normal id-assignment routine to ensure that all the class-ext
561 // data structures are eventually created and filled in. Basically how it works is the id-assignment
562 // function will check to see if it has a strong mutator-lock. If it does not then it will try to
563 // allocate the class-ext data structures normally and fail if it is unable to do so. In the case
564 // where mutator-lock is being held exclusive no attempt to allocate will be made and the thread
565 // will CHECK that allocations are being deferred (or that the method is obsolete, in which case
566 // there is no class-ext to store the method->id map in).
567 //
568 // Once the thread is done holding the exclusive mutator-lock it will go back and fill-in the
569 // class-ext data of all the methods that were added. We do this without the exclusive mutator-lock
570 // on a copy of the maps before we decrement the deferred refcount. This ensures that any other
571 // threads running at the same time know they need to perform a linear scan of the id-map. Since we
572 // don't have the mutator-lock anymore other threads can allocate the class-ext data, meaning our
573 // copy is fine. The only way additional methods could end up on the id-maps after our copy without
574 // having class-ext data is if another thread picked up the exclusive mutator-lock and added another
575 // defer, in which case that thread would fix-up the remaining ids. In this way we maintain eventual
576 // consistency between the class-ext method/field->id maps and the JniIdManager id->method/field
577 // maps.
578 //
579 // TODO It is possible that another thread to gain the mutator-lock and allocate new ids without
580 // calling StartDefer. This is basically a race that we should try to catch but doing so is
581 // rather difficult and since this defer system is only used in very rare circumstances unlikely to
582 // be worth the trouble.
StartDefer()583 void JniIdManager::StartDefer() {
584 Thread* self = Thread::Current();
585 WriterMutexLock mu(self, *Locks::jni_id_lock_);
586 if (deferred_allocation_refcount_++ == 0) {
587 deferred_allocation_field_id_start_ = next_field_id_;
588 deferred_allocation_method_id_start_ = next_method_id_;
589 }
590 }
591
592 class JniIdDeferStackReflectiveScope : public BaseReflectiveHandleScope {
593 public:
REQUIRES_SHARED(art::Locks::mutator_lock_)594 JniIdDeferStackReflectiveScope() REQUIRES_SHARED(art::Locks::mutator_lock_)
595 : BaseReflectiveHandleScope(), methods_(), fields_() {
596 PushScope(Thread::Current());
597 }
598
Initialize(const std::vector<ArtMethod * > & methods,const std::vector<ArtField * > & fields)599 void Initialize(const std::vector<ArtMethod*>& methods, const std::vector<ArtField*>& fields)
600 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Roles::uninterruptible_) {
601 methods_ = methods;
602 fields_ = fields;
603 }
604
REQUIRES_SHARED(Locks::mutator_lock_)605 ~JniIdDeferStackReflectiveScope() REQUIRES_SHARED(Locks::mutator_lock_) {
606 PopScope();
607 }
608
VisitTargets(ReflectiveValueVisitor * visitor)609 void VisitTargets(ReflectiveValueVisitor* visitor) override
610 REQUIRES_SHARED(Locks::mutator_lock_) {
611 for (auto it = methods_.begin(); it != methods_.end(); ++it) {
612 if (*it == nullptr) {
613 continue;
614 }
615 *it = visitor->VisitMethod(*it, ReflectiveHandleScopeSourceInfo(this));
616 }
617 for (auto it = fields_.begin(); it != fields_.end(); ++it) {
618 if (*it == nullptr) {
619 continue;
620 }
621 *it = visitor->VisitField(*it, ReflectiveHandleScopeSourceInfo(this));
622 }
623 }
624
GetFieldPtr(size_t idx)625 ArtField** GetFieldPtr(size_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
626 return &fields_[idx];
627 }
628
GetMethodPtr(size_t idx)629 ArtMethod** GetMethodPtr(size_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
630 return &methods_[idx];
631 }
632
NumFields() const633 size_t NumFields() const {
634 return fields_.size();
635 }
NumMethods() const636 size_t NumMethods() const {
637 return methods_.size();
638 }
639
640 private:
641 std::vector<ArtMethod*> methods_;
642 std::vector<ArtField*> fields_;
643 };
644
EndDefer()645 void JniIdManager::EndDefer() {
646 // Fixup the method->id map.
647 Thread* self = Thread::Current();
648 auto set_id = [&](auto** t, uintptr_t id) REQUIRES_SHARED(Locks::mutator_lock_) {
649 if (t == nullptr) {
650 return;
651 }
652 bool alloc_failure = EnsureIdsArray(self, (*t)->GetDeclaringClass(), *t);
653 ObjPtr<mirror::Class> klass((*t)->GetDeclaringClass());
654 size_t off = GetIdOffset(klass, (*t), kRuntimePointerSize);
655 ObjPtr<mirror::PointerArray> ids = GetIds(klass, (*t));
656 CHECK(!alloc_failure) << "Could not allocate jni ids array!";
657 if (ids.IsNull()) {
658 return;
659 }
660 if (kIsDebugBuild) {
661 uintptr_t old_id = ids->GetElementPtrSize<uintptr_t, kRuntimePointerSize>(off);
662 if (old_id != 0) {
663 DCHECK_EQ(old_id, id);
664 }
665 }
666 ids->SetElementPtrSize(off, reinterpret_cast<void*>(id), kRuntimePointerSize);
667 };
668 // To ensure eventual consistency this depends on the fact that the method_id_map_ and
669 // field_id_map_ are the ultimate source of truth and no id is ever reused to be valid. It also
670 // relies on all threads always getting calling StartDefer if they are going to be allocating jni
671 // ids while suspended. If a thread tries to do so while it doesn't have a scope we could miss
672 // ids.
673 // TODO We should use roles or something to verify that this requirement is not broken.
674 //
675 // If another thread comes along and adds more methods to the list after
676 // copying either (1) the id-maps are already present for the method and everything is fine, (2)
677 // the thread is not suspended and so can create the ext-data and id lists or, (3) the thread also
678 // suspended everything and incremented the deferred_allocation_refcount_ so it will fix up new
679 // ids when it finishes.
680 Locks::mutator_lock_->AssertNotExclusiveHeld(self);
681 Locks::mutator_lock_->AssertSharedHeld(self);
682 JniIdDeferStackReflectiveScope jidsrs;
683 uintptr_t method_start_id;
684 uintptr_t field_start_id;
685 {
686 ReaderMutexLock mu(self, *Locks::jni_id_lock_);
687 ScopedAssertNoThreadSuspension sants(__FUNCTION__);
688 jidsrs.Initialize(method_id_map_, field_id_map_);
689 method_start_id = deferred_allocation_method_id_start_;
690 field_start_id = deferred_allocation_field_id_start_;
691 }
692
693 for (size_t index = kIsDebugBuild ? 0 : IdToIndex(method_start_id); index < jidsrs.NumMethods();
694 ++index) {
695 set_id(jidsrs.GetMethodPtr(index), IndexToId(index));
696 }
697 for (size_t index = kIsDebugBuild ? 0 : IdToIndex(field_start_id); index < jidsrs.NumFields();
698 ++index) {
699 set_id(jidsrs.GetFieldPtr(index), IndexToId(index));
700 }
701 WriterMutexLock mu(self, *Locks::jni_id_lock_);
702 DCHECK_GE(deferred_allocation_refcount_, 1u);
703 if (--deferred_allocation_refcount_ == 0) {
704 deferred_allocation_field_id_start_ = 0;
705 deferred_allocation_method_id_start_ = 0;
706 }
707 }
708
ScopedEnableSuspendAllJniIdQueries()709 ScopedEnableSuspendAllJniIdQueries::ScopedEnableSuspendAllJniIdQueries()
710 : manager_(Runtime::Current()->GetJniIdManager()) {
711 manager_->StartDefer();
712 }
713
~ScopedEnableSuspendAllJniIdQueries()714 ScopedEnableSuspendAllJniIdQueries::~ScopedEnableSuspendAllJniIdQueries() {
715 manager_->EndDefer();
716 }
717
718 }; // namespace jni
719 }; // namespace art
720