1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_MIRROR_DEX_CACHE_H_ 18 #define ART_RUNTIME_MIRROR_DEX_CACHE_H_ 19 20 #include "array.h" 21 #include "base/array_ref.h" 22 #include "base/atomic_pair.h" 23 #include "base/bit_utils.h" 24 #include "base/locks.h" 25 #include "base/macros.h" 26 #include "dex/dex_file.h" 27 #include "dex/dex_file_types.h" 28 #include "gc_root.h" // Note: must not use -inl here to avoid circular dependency. 29 #include "linear_alloc.h" 30 #include "object.h" 31 #include "object_array.h" 32 33 namespace art HIDDEN { 34 35 namespace linker { 36 class ImageWriter; 37 } // namespace linker 38 39 class ArtField; 40 class ArtMethod; 41 struct DexCacheOffsets; 42 class DexFile; 43 union JValue; 44 class ReflectiveValueVisitor; 45 class Thread; 46 47 namespace mirror { 48 49 class CallSite; 50 class Class; 51 class ClassLoader; 52 class DexCache; 53 class MethodType; 54 class String; 55 56 template <typename T> struct alignas(8) DexCachePair { 57 GcRoot<T> object; 58 uint32_t index; 59 // The array is initially [ {0,0}, {0,0}, {0,0} ... ] 60 // We maintain the invariant that once a dex cache entry is populated, 61 // the pointer is always non-0 62 // Any given entry would thus be: 63 // {non-0, non-0} OR {0,0} 64 // 65 // It's generally sufficiently enough then to check if the 66 // lookup index matches the stored index (for a >0 lookup index) 67 // because if it's true the pointer is also non-null. 68 // 69 // For the 0th entry which is a special case, the value is either 70 // {0,0} (initial state) or {non-0, 0} which indicates 71 // that a valid object is stored at that index for a dex section id of 0. 72 // 73 // As an optimization, we want to avoid branching on the object pointer since 74 // it's always non-null if the id branch succeeds (except for the 0th id). 75 // Set the initial state for the 0th entry to be {0,1} which is guaranteed to fail 76 // the lookup id == stored id branch. 77 DexCachePair(ObjPtr<T> object, uint32_t index); DexCachePairDexCachePair78 DexCachePair() : index(0) {} 79 DexCachePair(const DexCachePair<T>&) = default; 80 DexCachePair& operator=(const DexCachePair<T>&) = default; 81 82 static void Initialize(std::atomic<DexCachePair<T>>* dex_cache); 83 InvalidIndexForSlotDexCachePair84 static uint32_t InvalidIndexForSlot(uint32_t slot) { 85 // Since the cache size is a power of two, 0 will always map to slot 0. 86 // Use 1 for slot 0 and 0 for all other slots. 87 return (slot == 0) ? 1u : 0u; 88 } 89 90 T* GetObjectForIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_); 91 }; 92 93 template <typename T> struct alignas(2 * __SIZEOF_POINTER__) NativeDexCachePair { 94 T* object; 95 size_t index; 96 // This is similar to DexCachePair except that we're storing a native pointer 97 // instead of a GC root. See DexCachePair for the details. NativeDexCachePairNativeDexCachePair98 NativeDexCachePair(T* object, uint32_t index) 99 : object(object), 100 index(index) {} NativeDexCachePairNativeDexCachePair101 NativeDexCachePair() : object(nullptr), index(0u) { } 102 NativeDexCachePair(const NativeDexCachePair<T>&) = default; 103 NativeDexCachePair& operator=(const NativeDexCachePair<T>&) = default; 104 105 static void Initialize(std::atomic<NativeDexCachePair<T>>* dex_cache); 106 InvalidIndexForSlotNativeDexCachePair107 static uint32_t InvalidIndexForSlot(uint32_t slot) { 108 // Since the cache size is a power of two, 0 will always map to slot 0. 109 // Use 1 for slot 0 and 0 for all other slots. 110 return (slot == 0) ? 1u : 0u; 111 } 112 GetObjectForIndexNativeDexCachePair113 T* GetObjectForIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_) { 114 if (idx != index) { 115 return nullptr; 116 } 117 DCHECK(object != nullptr); 118 return object; 119 } 120 }; 121 122 template <typename T, size_t size> class NativeDexCachePairArray { 123 public: NativeDexCachePairArray()124 NativeDexCachePairArray() {} 125 Get(uint32_t index)126 T* Get(uint32_t index) REQUIRES_SHARED(Locks::mutator_lock_) { 127 auto pair = GetNativePair(entries_, SlotIndex(index)); 128 return pair.GetObjectForIndex(index); 129 } 130 Set(uint32_t index,T * value)131 void Set(uint32_t index, T* value) { 132 NativeDexCachePair<T> pair(value, index); 133 SetNativePair(entries_, SlotIndex(index), pair); 134 } 135 GetNativePair(uint32_t index)136 NativeDexCachePair<T> GetNativePair(uint32_t index) REQUIRES_SHARED(Locks::mutator_lock_) { 137 return GetNativePair(entries_, SlotIndex(index)); 138 } 139 SetNativePair(uint32_t index,NativeDexCachePair<T> value)140 void SetNativePair(uint32_t index, NativeDexCachePair<T> value) { 141 SetNativePair(entries_, SlotIndex(index), value); 142 } 143 144 private: GetNativePair(std::atomic<NativeDexCachePair<T>> * pair_array,size_t idx)145 NativeDexCachePair<T> GetNativePair(std::atomic<NativeDexCachePair<T>>* pair_array, size_t idx) { 146 auto* array = reinterpret_cast<AtomicPair<uintptr_t>*>(pair_array); 147 AtomicPair<uintptr_t> value = AtomicPairLoadAcquire(&array[idx]); 148 return NativeDexCachePair<T>(reinterpret_cast<T*>(value.val), value.key); 149 } 150 SetNativePair(std::atomic<NativeDexCachePair<T>> * pair_array,size_t idx,NativeDexCachePair<T> pair)151 void SetNativePair(std::atomic<NativeDexCachePair<T>>* pair_array, 152 size_t idx, 153 NativeDexCachePair<T> pair) { 154 auto* array = reinterpret_cast<AtomicPair<uintptr_t>*>(pair_array); 155 AtomicPair<uintptr_t> v(pair.index, reinterpret_cast<size_t>(pair.object)); 156 AtomicPairStoreRelease(&array[idx], v); 157 } 158 SlotIndex(uint32_t index)159 uint32_t SlotIndex(uint32_t index) { 160 return index % size; 161 } 162 163 std::atomic<NativeDexCachePair<T>> entries_[0]; 164 165 NativeDexCachePairArray(const NativeDexCachePairArray<T, size>&) = delete; 166 NativeDexCachePairArray& operator=(const NativeDexCachePairArray<T, size>&) = delete; 167 }; 168 169 template <typename T, size_t size> class DexCachePairArray { 170 public: DexCachePairArray()171 DexCachePairArray() {} 172 Get(uint32_t index)173 T* Get(uint32_t index) REQUIRES_SHARED(Locks::mutator_lock_) { 174 return GetPair(index).GetObjectForIndex(index); 175 } 176 Set(uint32_t index,T * value)177 void Set(uint32_t index, T* value) REQUIRES_SHARED(Locks::mutator_lock_) { 178 SetPair(index, DexCachePair<T>(value, index)); 179 } 180 GetPair(uint32_t index)181 DexCachePair<T> GetPair(uint32_t index) { 182 return entries_[SlotIndex(index)].load(std::memory_order_acquire); 183 } 184 SetPair(uint32_t index,DexCachePair<T> value)185 void SetPair(uint32_t index, DexCachePair<T> value) { 186 entries_[SlotIndex(index)].store(value, std::memory_order_release); 187 } 188 Clear(uint32_t index)189 void Clear(uint32_t index) { 190 uint32_t slot = SlotIndex(index); 191 // This is racy but should only be called from the transactional interpreter. 192 if (entries_[slot].load(std::memory_order_relaxed).index == index) { 193 DexCachePair<T> cleared(nullptr, DexCachePair<T>::InvalidIndexForSlot(slot)); 194 entries_[slot].store(cleared, std::memory_order_relaxed); 195 } 196 } 197 198 private: SlotIndex(uint32_t index)199 uint32_t SlotIndex(uint32_t index) { 200 return index % size; 201 } 202 203 std::atomic<DexCachePair<T>> entries_[0]; 204 205 DexCachePairArray(const DexCachePairArray<T, size>&) = delete; 206 DexCachePairArray& operator=(const DexCachePairArray<T, size>&) = delete; 207 }; 208 209 template <typename T> class GcRootArray { 210 public: GcRootArray()211 GcRootArray() {} 212 213 T* Get(uint32_t index) REQUIRES_SHARED(Locks::mutator_lock_); 214 GetGcRoot(uint32_t index)215 Atomic<GcRoot<T>>* GetGcRoot(uint32_t index) REQUIRES_SHARED(Locks::mutator_lock_) { 216 return &entries_[index]; 217 } 218 219 // Only to be used in locations that don't need the atomic or will later load 220 // and read atomically. GetGcRootAddress(uint32_t index)221 GcRoot<T>* GetGcRootAddress(uint32_t index) REQUIRES_SHARED(Locks::mutator_lock_) { 222 static_assert(sizeof(GcRoot<T>) == sizeof(Atomic<GcRoot<T>>)); 223 return reinterpret_cast<GcRoot<T>*>(&entries_[index]); 224 } 225 226 void Set(uint32_t index, T* value) REQUIRES_SHARED(Locks::mutator_lock_); 227 228 private: 229 Atomic<GcRoot<T>> entries_[0]; 230 }; 231 232 template <typename T> class NativeArray { 233 public: NativeArray()234 NativeArray() {} 235 Get(uint32_t index)236 T* Get(uint32_t index) { 237 return entries_[index].load(std::memory_order_relaxed); 238 } 239 GetPtrEntryPtrSize(uint32_t index,PointerSize ptr_size)240 T** GetPtrEntryPtrSize(uint32_t index, PointerSize ptr_size) { 241 if (ptr_size == PointerSize::k64) { 242 return reinterpret_cast<T**>(reinterpret_cast<uint64_t*>(entries_) + index); 243 } else { 244 return reinterpret_cast<T**>(reinterpret_cast<uint32_t*>(entries_) + index); 245 } 246 } 247 Set(uint32_t index,T * value)248 void Set(uint32_t index, T* value) { 249 entries_[index].store(value, std::memory_order_relaxed); 250 } 251 252 private: 253 Atomic<T*> entries_[0]; 254 }; 255 256 // C++ mirror of java.lang.DexCache. 257 class MANAGED DexCache final : public Object { 258 public: 259 MIRROR_CLASS("Ljava/lang/DexCache;"); 260 261 // Size of java.lang.DexCache.class. 262 static uint32_t ClassSize(PointerSize pointer_size); 263 264 // Note: update the image version in image.cc if changing any of these cache sizes. 265 266 // Size of type dex cache. Needs to be a power of 2 for entrypoint assumptions to hold. 267 static constexpr size_t kDexCacheTypeCacheSize = 1024; 268 static_assert(IsPowerOfTwo(kDexCacheTypeCacheSize), 269 "Type dex cache size is not a power of 2."); 270 271 // Size of string dex cache. Needs to be a power of 2 for entrypoint assumptions to hold. 272 static constexpr size_t kDexCacheStringCacheSize = 1024; 273 static_assert(IsPowerOfTwo(kDexCacheStringCacheSize), 274 "String dex cache size is not a power of 2."); 275 276 // Size of field dex cache. Needs to be a power of 2 for entrypoint assumptions to hold. 277 static constexpr size_t kDexCacheFieldCacheSize = 1024; 278 static_assert(IsPowerOfTwo(kDexCacheFieldCacheSize), 279 "Field dex cache size is not a power of 2."); 280 281 // Size of method dex cache. Needs to be a power of 2 for entrypoint assumptions to hold. 282 static constexpr size_t kDexCacheMethodCacheSize = 1024; 283 static_assert(IsPowerOfTwo(kDexCacheMethodCacheSize), 284 "Method dex cache size is not a power of 2."); 285 286 // Size of method type dex cache. Needs to be a power of 2 for entrypoint assumptions 287 // to hold. 288 static constexpr size_t kDexCacheMethodTypeCacheSize = 1024; 289 static_assert(IsPowerOfTwo(kDexCacheMethodTypeCacheSize), 290 "MethodType dex cache size is not a power of 2."); 291 292 // Size of an instance of java.lang.DexCache not including referenced values. InstanceSize()293 static constexpr uint32_t InstanceSize() { 294 return sizeof(DexCache); 295 } 296 297 // Visit gc-roots in DexCachePair array in [pairs_begin, pairs_end) range. 298 template <typename Visitor> 299 static void VisitDexCachePairRoots(Visitor& visitor, 300 DexCachePair<Object>* pairs_begin, 301 DexCachePair<Object>* pairs_end) 302 REQUIRES_SHARED(Locks::mutator_lock_); 303 304 EXPORT void Initialize(const DexFile* dex_file, ObjPtr<ClassLoader> class_loader) 305 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::dex_lock_); 306 307 // Zero all array references. 308 // WARNING: This does not free the memory since it is in LinearAlloc. 309 EXPORT void ResetNativeArrays() REQUIRES_SHARED(Locks::mutator_lock_); 310 311 template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, 312 ReadBarrierOption kReadBarrierOption = kWithReadBarrier> 313 ObjPtr<String> GetLocation(bool allow_location_mismatch = false) 314 REQUIRES_SHARED(Locks::mutator_lock_); 315 316 String* GetResolvedString(dex::StringIndex string_idx) ALWAYS_INLINE 317 REQUIRES_SHARED(Locks::mutator_lock_); 318 319 void SetResolvedString(dex::StringIndex string_idx, ObjPtr<mirror::String> resolved) ALWAYS_INLINE 320 REQUIRES_SHARED(Locks::mutator_lock_); 321 322 // Clear a string for a string_idx, used to undo string intern transactions to make sure 323 // the string isn't kept live. 324 void ClearString(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_); 325 326 Class* GetResolvedType(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_); 327 328 void SetResolvedType(dex::TypeIndex type_idx, ObjPtr<Class> resolved) 329 REQUIRES_SHARED(Locks::mutator_lock_); 330 331 void ClearResolvedType(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_); 332 333 ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx) 334 REQUIRES_SHARED(Locks::mutator_lock_); 335 336 ALWAYS_INLINE void SetResolvedMethod(uint32_t method_idx, ArtMethod* resolved) 337 REQUIRES_SHARED(Locks::mutator_lock_); 338 339 ALWAYS_INLINE ArtField* GetResolvedField(uint32_t idx) 340 REQUIRES_SHARED(Locks::mutator_lock_); 341 342 ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field) 343 REQUIRES_SHARED(Locks::mutator_lock_); 344 345 MethodType* GetResolvedMethodType(dex::ProtoIndex proto_idx) REQUIRES_SHARED(Locks::mutator_lock_); 346 347 void SetResolvedMethodType(dex::ProtoIndex proto_idx, MethodType* resolved) 348 REQUIRES_SHARED(Locks::mutator_lock_); 349 350 // Clear a method type for proto_idx, used to undo method type resolution 351 // in aborted transactions to make sure the method type isn't kept live. 352 void ClearMethodType(dex::ProtoIndex proto_idx) REQUIRES_SHARED(Locks::mutator_lock_); 353 354 CallSite* GetResolvedCallSite(uint32_t call_site_idx) REQUIRES_SHARED(Locks::mutator_lock_); 355 356 // Attempts to bind |call_site_idx| to the call site |resolved|. The 357 // caller must use the return value in place of |resolved|. This is 358 // because multiple threads can invoke the bootstrap method each 359 // producing a call site, but the method handle invocation on the 360 // call site must be on a common agreed value. 361 ObjPtr<CallSite> SetResolvedCallSite(uint32_t call_site_idx, ObjPtr<CallSite> resolved) 362 REQUIRES_SHARED(Locks::mutator_lock_) WARN_UNUSED; 363 GetDexFile()364 const DexFile* GetDexFile() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) { 365 return GetFieldPtr<const DexFile*>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_)); 366 } 367 SetDexFile(const DexFile * dex_file)368 void SetDexFile(const DexFile* dex_file) REQUIRES_SHARED(Locks::mutator_lock_) { 369 SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file); 370 } 371 372 EXPORT void SetLocation(ObjPtr<String> location) REQUIRES_SHARED(Locks::mutator_lock_); 373 374 void VisitReflectiveTargets(ReflectiveValueVisitor* visitor) REQUIRES(Locks::mutator_lock_); 375 376 void SetClassLoader(ObjPtr<ClassLoader> class_loader) REQUIRES_SHARED(Locks::mutator_lock_); 377 378 EXPORT ObjPtr<ClassLoader> GetClassLoader() REQUIRES_SHARED(Locks::mutator_lock_); 379 380 template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, 381 ReadBarrierOption kReadBarrierOption = kWithReadBarrier, 382 typename Visitor> 383 void VisitNativeRoots(const Visitor& visitor) 384 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_); 385 386 // Sets null to dex cache array fields which were allocated with the startup 387 // allocator. 388 void UnlinkStartupCaches() REQUIRES_SHARED(Locks::mutator_lock_); 389 390 // Returns whether we should allocate a full array given the number of elements. 391 // Note: update the image version in image.cc if changing this method. ShouldAllocateFullArray(size_t number_of_elements,size_t dex_cache_size)392 static bool ShouldAllocateFullArray(size_t number_of_elements, size_t dex_cache_size) { 393 return number_of_elements <= dex_cache_size; 394 } 395 396 397 // NOLINTBEGIN(bugprone-macro-parentheses) 398 #define DEFINE_ARRAY(name, array_kind, getter_setter, type, ids, alloc_kind) \ 399 template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> \ 400 array_kind* Get ##getter_setter() \ 401 ALWAYS_INLINE \ 402 REQUIRES_SHARED(Locks::mutator_lock_) { \ 403 return GetFieldPtr<array_kind*, kVerifyFlags>(getter_setter ##Offset()); \ 404 } \ 405 void Set ##getter_setter(array_kind* value) \ 406 REQUIRES_SHARED(Locks::mutator_lock_) { \ 407 SetFieldPtr<false>(getter_setter ##Offset(), value); \ 408 } \ 409 static constexpr MemberOffset getter_setter ##Offset() { \ 410 return OFFSET_OF_OBJECT_MEMBER(DexCache, name); \ 411 } \ 412 array_kind* Allocate ##getter_setter(bool startup = false) \ 413 REQUIRES_SHARED(Locks::mutator_lock_) { \ 414 return reinterpret_cast<array_kind*>(AllocArray<type>( \ 415 getter_setter ##Offset(), GetDexFile()->ids(), alloc_kind, startup)); \ 416 } \ 417 template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> \ 418 size_t Num ##getter_setter() REQUIRES_SHARED(Locks::mutator_lock_) { \ 419 return Get ##getter_setter() == nullptr ? 0u : GetDexFile()->ids(); \ 420 } \ 421 422 #define DEFINE_PAIR_ARRAY(name, pair_kind, getter_setter, type, size, alloc_kind) \ 423 template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> \ 424 pair_kind ##Array<type, size>* Get ##getter_setter() \ 425 ALWAYS_INLINE \ 426 REQUIRES_SHARED(Locks::mutator_lock_) { \ 427 return GetFieldPtr<pair_kind ##Array<type, size>*, kVerifyFlags>(getter_setter ##Offset()); \ 428 } \ 429 void Set ##getter_setter(pair_kind ##Array<type, size>* value) \ 430 REQUIRES_SHARED(Locks::mutator_lock_) { \ 431 SetFieldPtr<false>(getter_setter ##Offset(), value); \ 432 } \ 433 static constexpr MemberOffset getter_setter ##Offset() { \ 434 return OFFSET_OF_OBJECT_MEMBER(DexCache, name); \ 435 } \ 436 pair_kind ##Array<type, size>* Allocate ##getter_setter() \ 437 REQUIRES_SHARED(Locks::mutator_lock_) { \ 438 return reinterpret_cast<pair_kind ##Array<type, size>*>( \ 439 AllocArray<std::atomic<pair_kind<type>>>( \ 440 getter_setter ##Offset(), size, alloc_kind)); \ 441 } \ 442 template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> \ 443 size_t Num ##getter_setter() REQUIRES_SHARED(Locks::mutator_lock_) { \ 444 return Get ##getter_setter() == nullptr ? 0u : size; \ 445 } \ 446 447 #define DEFINE_DUAL_CACHE( \ 448 name, pair_kind, getter_setter, type, pair_size, alloc_pair_kind, \ 449 array_kind, component_type, ids, alloc_array_kind) \ 450 DEFINE_PAIR_ARRAY( \ 451 name, pair_kind, getter_setter, type, pair_size, alloc_pair_kind) \ 452 DEFINE_ARRAY( \ 453 name ##array_, array_kind, getter_setter ##Array, component_type, ids, alloc_array_kind) \ 454 type* Get ##getter_setter ##Entry(uint32_t index) REQUIRES_SHARED(Locks::mutator_lock_) { \ 455 DCHECK_LT(index, GetDexFile()->ids()); \ 456 auto* array = Get ##getter_setter ##Array(); \ 457 if (array != nullptr) { \ 458 return array->Get(index); \ 459 } \ 460 auto* pairs = Get ##getter_setter(); \ 461 if (pairs != nullptr) { \ 462 return pairs->Get(index); \ 463 } \ 464 return nullptr; \ 465 } \ 466 void Set ##getter_setter ##Entry(uint32_t index, type* resolved) \ 467 REQUIRES_SHARED(Locks::mutator_lock_) { \ 468 DCHECK_LT(index, GetDexFile()->ids()); \ 469 auto* array = Get ##getter_setter ##Array(); \ 470 if (array != nullptr) { \ 471 array->Set(index, resolved); \ 472 } else { \ 473 auto* pairs = Get ##getter_setter(); \ 474 if (pairs == nullptr) { \ 475 bool should_allocate_full_array = ShouldAllocateFullArray(GetDexFile()->ids(), pair_size); \ 476 if (ShouldAllocateFullArrayAtStartup() || should_allocate_full_array) { \ 477 array = Allocate ##getter_setter ##Array(!should_allocate_full_array); \ 478 array->Set(index, resolved); \ 479 } else { \ 480 pairs = Allocate ##getter_setter(); \ 481 pairs->Set(index, resolved); \ 482 } \ 483 } else { \ 484 pairs->Set(index, resolved); \ 485 } \ 486 } \ 487 } \ 488 void Unlink ##getter_setter ##ArrayIfStartup() \ 489 REQUIRES_SHARED(Locks::mutator_lock_) { \ 490 if (!ShouldAllocateFullArray(GetDexFile()->ids(), pair_size)) { \ 491 Set ##getter_setter ##Array(nullptr) ; \ 492 } \ 493 } 494 495 DEFINE_ARRAY(resolved_call_sites_, 496 GcRootArray<CallSite>, 497 ResolvedCallSites, 498 GcRoot<CallSite>, 499 NumCallSiteIds, 500 LinearAllocKind::kGCRootArray) 501 502 DEFINE_DUAL_CACHE(resolved_fields_, 503 NativeDexCachePair, 504 ResolvedFields, 505 ArtField, 506 kDexCacheFieldCacheSize, 507 LinearAllocKind::kNoGCRoots, 508 NativeArray<ArtField>, 509 ArtField*, 510 NumFieldIds, 511 LinearAllocKind::kNoGCRoots) 512 513 DEFINE_DUAL_CACHE(resolved_method_types_, 514 DexCachePair, 515 ResolvedMethodTypes, 516 mirror::MethodType, 517 kDexCacheMethodTypeCacheSize, 518 LinearAllocKind::kDexCacheArray, 519 GcRootArray<mirror::MethodType>, 520 GcRoot<mirror::MethodType>, 521 NumProtoIds, 522 LinearAllocKind::kGCRootArray); 523 524 DEFINE_DUAL_CACHE(resolved_methods_, 525 NativeDexCachePair, 526 ResolvedMethods, 527 ArtMethod, 528 kDexCacheMethodCacheSize, 529 LinearAllocKind::kNoGCRoots, 530 NativeArray<ArtMethod>, 531 ArtMethod*, 532 NumMethodIds, 533 LinearAllocKind::kNoGCRoots) 534 535 DEFINE_DUAL_CACHE(resolved_types_, 536 DexCachePair, 537 ResolvedTypes, 538 mirror::Class, 539 kDexCacheTypeCacheSize, 540 LinearAllocKind::kDexCacheArray, 541 GcRootArray<mirror::Class>, 542 GcRoot<mirror::Class>, 543 NumTypeIds, 544 LinearAllocKind::kGCRootArray); 545 546 DEFINE_DUAL_CACHE(strings_, 547 DexCachePair, 548 Strings, 549 mirror::String, 550 kDexCacheStringCacheSize, 551 LinearAllocKind::kDexCacheArray, 552 GcRootArray<mirror::String>, 553 GcRoot<mirror::String>, 554 NumStringIds, 555 LinearAllocKind::kGCRootArray); 556 557 // NOLINTEND(bugprone-macro-parentheses) 558 559 private: 560 // Allocate new array in linear alloc and save it in the given fields. 561 template<typename T> 562 T* AllocArray(MemberOffset obj_offset, size_t num, LinearAllocKind kind, bool startup = false) 563 REQUIRES_SHARED(Locks::mutator_lock_); 564 565 // Visit instance fields of the dex cache as well as its associated arrays. 566 template <bool kVisitNativeRoots, 567 VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, 568 ReadBarrierOption kReadBarrierOption = kWithReadBarrier, 569 typename Visitor> 570 void VisitReferences(ObjPtr<Class> klass, const Visitor& visitor) 571 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_); 572 573 // Returns whether we should allocate a full array given the current state of 574 // the runtime and oat files. 575 bool ShouldAllocateFullArrayAtStartup() REQUIRES_SHARED(Locks::mutator_lock_); 576 577 HeapReference<ClassLoader> class_loader_; 578 HeapReference<String> location_; 579 580 uint64_t dex_file_; // const DexFile* 581 // 582 uint64_t resolved_call_sites_; // Array of call sites 583 uint64_t resolved_fields_; // NativeDexCacheArray holding ArtField's 584 uint64_t resolved_fields_array_; // Array of ArtField's. 585 uint64_t resolved_method_types_; // DexCacheArray holding mirror::MethodType's 586 uint64_t resolved_method_types_array_; // Array of mirror::MethodType's 587 uint64_t resolved_methods_; // NativeDexCacheArray holding ArtMethod's 588 uint64_t resolved_methods_array_; // Array of ArtMethod's 589 uint64_t resolved_types_; // DexCacheArray holding mirror::Class's 590 uint64_t resolved_types_array_; // Array of resolved types. 591 uint64_t strings_; // DexCacheArray holding mirror::String's 592 uint64_t strings_array_; // Array of String's. 593 594 friend struct art::DexCacheOffsets; // for verifying offset information 595 friend class linker::ImageWriter; 596 friend class Object; // For VisitReferences 597 DISALLOW_IMPLICIT_CONSTRUCTORS(DexCache); 598 }; 599 600 } // namespace mirror 601 } // namespace art 602 603 #endif // ART_RUNTIME_MIRROR_DEX_CACHE_H_ 604