• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_MIRROR_DEX_CACHE_H_
18 #define ART_RUNTIME_MIRROR_DEX_CACHE_H_
19 
20 #include "array.h"
21 #include "base/array_ref.h"
22 #include "base/atomic_pair.h"
23 #include "base/bit_utils.h"
24 #include "base/locks.h"
25 #include "dex/dex_file.h"
26 #include "dex/dex_file_types.h"
27 #include "gc_root.h"  // Note: must not use -inl here to avoid circular dependency.
28 #include "linear_alloc.h"
29 #include "object.h"
30 #include "object_array.h"
31 
32 namespace art {
33 
34 namespace linker {
35 class ImageWriter;
36 }  // namespace linker
37 
38 class ArtField;
39 class ArtMethod;
40 struct DexCacheOffsets;
41 class DexFile;
42 union JValue;
43 class ReflectiveValueVisitor;
44 class Thread;
45 
46 namespace mirror {
47 
48 class CallSite;
49 class Class;
50 class ClassLoader;
51 class DexCache;
52 class MethodType;
53 class String;
54 
55 template <typename T> struct PACKED(8) DexCachePair {
56   GcRoot<T> object;
57   uint32_t index;
58   // The array is initially [ {0,0}, {0,0}, {0,0} ... ]
59   // We maintain the invariant that once a dex cache entry is populated,
60   // the pointer is always non-0
61   // Any given entry would thus be:
62   // {non-0, non-0} OR {0,0}
63   //
64   // It's generally sufficiently enough then to check if the
65   // lookup index matches the stored index (for a >0 lookup index)
66   // because if it's true the pointer is also non-null.
67   //
68   // For the 0th entry which is a special case, the value is either
69   // {0,0} (initial state) or {non-0, 0} which indicates
70   // that a valid object is stored at that index for a dex section id of 0.
71   //
72   // As an optimization, we want to avoid branching on the object pointer since
73   // it's always non-null if the id branch succeeds (except for the 0th id).
74   // Set the initial state for the 0th entry to be {0,1} which is guaranteed to fail
75   // the lookup id == stored id branch.
76   DexCachePair(ObjPtr<T> object, uint32_t index);
DexCachePairDexCachePair77   DexCachePair() : index(0) {}
78   DexCachePair(const DexCachePair<T>&) = default;
79   DexCachePair& operator=(const DexCachePair<T>&) = default;
80 
81   static void Initialize(std::atomic<DexCachePair<T>>* dex_cache);
82 
InvalidIndexForSlotDexCachePair83   static uint32_t InvalidIndexForSlot(uint32_t slot) {
84     // Since the cache size is a power of two, 0 will always map to slot 0.
85     // Use 1 for slot 0 and 0 for all other slots.
86     return (slot == 0) ? 1u : 0u;
87   }
88 
89   T* GetObjectForIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_);
90 };
91 
92 template <typename T> struct PACKED(2 * __SIZEOF_POINTER__) NativeDexCachePair {
93   T* object;
94   size_t index;
95   // This is similar to DexCachePair except that we're storing a native pointer
96   // instead of a GC root. See DexCachePair for the details.
NativeDexCachePairNativeDexCachePair97   NativeDexCachePair(T* object, uint32_t index)
98       : object(object),
99         index(index) {}
NativeDexCachePairNativeDexCachePair100   NativeDexCachePair() : object(nullptr), index(0u) { }
101   NativeDexCachePair(const NativeDexCachePair<T>&) = default;
102   NativeDexCachePair& operator=(const NativeDexCachePair<T>&) = default;
103 
104   static void Initialize(std::atomic<NativeDexCachePair<T>>* dex_cache);
105 
InvalidIndexForSlotNativeDexCachePair106   static uint32_t InvalidIndexForSlot(uint32_t slot) {
107     // Since the cache size is a power of two, 0 will always map to slot 0.
108     // Use 1 for slot 0 and 0 for all other slots.
109     return (slot == 0) ? 1u : 0u;
110   }
111 
GetObjectForIndexNativeDexCachePair112   T* GetObjectForIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
113     if (idx != index) {
114       return nullptr;
115     }
116     DCHECK(object != nullptr);
117     return object;
118   }
119 };
120 
121 template <typename T, size_t size> class NativeDexCachePairArray {
122  public:
NativeDexCachePairArray()123   NativeDexCachePairArray() {}
124 
Get(uint32_t index)125   T* Get(uint32_t index) REQUIRES_SHARED(Locks::mutator_lock_) {
126     auto pair = GetNativePair(entries_, SlotIndex(index));
127     return pair.GetObjectForIndex(index);
128   }
129 
Set(uint32_t index,T * value)130   void Set(uint32_t index, T* value) {
131     NativeDexCachePair<T> pair(value, index);
132     SetNativePair(entries_, SlotIndex(index), pair);
133   }
134 
GetNativePair(uint32_t index)135   NativeDexCachePair<T> GetNativePair(uint32_t index) REQUIRES_SHARED(Locks::mutator_lock_) {
136     return GetNativePair(entries_, SlotIndex(index));
137   }
138 
SetNativePair(uint32_t index,NativeDexCachePair<T> value)139   void SetNativePair(uint32_t index, NativeDexCachePair<T> value) {
140     SetNativePair(entries_, SlotIndex(index), value);
141   }
142 
143  private:
GetNativePair(std::atomic<NativeDexCachePair<T>> * pair_array,size_t idx)144   NativeDexCachePair<T> GetNativePair(std::atomic<NativeDexCachePair<T>>* pair_array, size_t idx) {
145     auto* array = reinterpret_cast<std::atomic<AtomicPair<uintptr_t>>*>(pair_array);
146     AtomicPair<uintptr_t> value = AtomicPairLoadAcquire(&array[idx]);
147     return NativeDexCachePair<T>(reinterpret_cast<T*>(value.first), value.second);
148   }
149 
SetNativePair(std::atomic<NativeDexCachePair<T>> * pair_array,size_t idx,NativeDexCachePair<T> pair)150   void SetNativePair(std::atomic<NativeDexCachePair<T>>* pair_array,
151                      size_t idx,
152                      NativeDexCachePair<T> pair) {
153     auto* array = reinterpret_cast<std::atomic<AtomicPair<uintptr_t>>*>(pair_array);
154     AtomicPair<uintptr_t> v(reinterpret_cast<size_t>(pair.object), pair.index);
155     AtomicPairStoreRelease(&array[idx], v);
156   }
157 
SlotIndex(uint32_t index)158   uint32_t SlotIndex(uint32_t index) {
159     return index % size;
160   }
161 
162   std::atomic<NativeDexCachePair<T>> entries_[0];
163 
164   NativeDexCachePairArray(const NativeDexCachePairArray<T, size>&) = delete;
165   NativeDexCachePairArray& operator=(const NativeDexCachePairArray<T, size>&) = delete;
166 };
167 
168 template <typename T, size_t size> class DexCachePairArray {
169  public:
DexCachePairArray()170   DexCachePairArray() {}
171 
Get(uint32_t index)172   T* Get(uint32_t index) REQUIRES_SHARED(Locks::mutator_lock_) {
173     return GetPair(index).GetObjectForIndex(index);
174   }
175 
Set(uint32_t index,T * value)176   void Set(uint32_t index, T* value) REQUIRES_SHARED(Locks::mutator_lock_) {
177     SetPair(index, DexCachePair<T>(value, index));
178   }
179 
GetPair(uint32_t index)180   DexCachePair<T> GetPair(uint32_t index) {
181     return entries_[SlotIndex(index)].load(std::memory_order_relaxed);
182   }
183 
SetPair(uint32_t index,DexCachePair<T> value)184   void SetPair(uint32_t index, DexCachePair<T> value) {
185     entries_[SlotIndex(index)].store(value, std::memory_order_relaxed);
186   }
187 
Clear(uint32_t index)188   void Clear(uint32_t index) {
189     uint32_t slot = SlotIndex(index);
190     // This is racy but should only be called from the transactional interpreter.
191     if (entries_[slot].load(std::memory_order_relaxed).index == index) {
192       DexCachePair<T> cleared(nullptr, DexCachePair<T>::InvalidIndexForSlot(slot));
193       entries_[slot].store(cleared, std::memory_order_relaxed);
194     }
195   }
196 
197  private:
SlotIndex(uint32_t index)198   uint32_t SlotIndex(uint32_t index) {
199     return index % size;
200   }
201 
202   std::atomic<DexCachePair<T>> entries_[0];
203 
204   DexCachePairArray(const DexCachePairArray<T, size>&) = delete;
205   DexCachePairArray& operator=(const DexCachePairArray<T, size>&) = delete;
206 };
207 
208 template <typename T> class GcRootArray {
209  public:
GcRootArray()210   GcRootArray() {}
211 
212   T* Get(uint32_t index) REQUIRES_SHARED(Locks::mutator_lock_);
213 
GetGcRoot(uint32_t index)214   Atomic<GcRoot<T>>* GetGcRoot(uint32_t index) REQUIRES_SHARED(Locks::mutator_lock_) {
215     return &entries_[index];
216   }
217 
218   // Only to be used in locations that don't need the atomic or will later load
219   // and read atomically.
GetGcRootAddress(uint32_t index)220   GcRoot<T>* GetGcRootAddress(uint32_t index) REQUIRES_SHARED(Locks::mutator_lock_) {
221     static_assert(sizeof(GcRoot<T>) == sizeof(Atomic<GcRoot<T>>));
222     return reinterpret_cast<GcRoot<T>*>(&entries_[index]);
223   }
224 
225   void Set(uint32_t index, T* value) REQUIRES_SHARED(Locks::mutator_lock_);
226 
227  private:
228   Atomic<GcRoot<T>> entries_[0];
229 };
230 
231 template <typename T> class NativeArray {
232  public:
NativeArray()233   NativeArray() {}
234 
Get(uint32_t index)235   T* Get(uint32_t index) {
236     return entries_[index].load(std::memory_order_relaxed);
237   }
238 
GetPtrEntryPtrSize(uint32_t index,PointerSize ptr_size)239   T** GetPtrEntryPtrSize(uint32_t index, PointerSize ptr_size) {
240     if (ptr_size == PointerSize::k64) {
241       return reinterpret_cast<T**>(reinterpret_cast<uint64_t*>(entries_) + index);
242     } else {
243       return reinterpret_cast<T**>(reinterpret_cast<uint32_t*>(entries_) + index);
244     }
245   }
246 
Set(uint32_t index,T * value)247   void Set(uint32_t index, T* value) {
248     entries_[index].store(value, std::memory_order_relaxed);
249   }
250 
251  private:
252   Atomic<T*> entries_[0];
253 };
254 
255 // C++ mirror of java.lang.DexCache.
256 class MANAGED DexCache final : public Object {
257  public:
258   MIRROR_CLASS("Ljava/lang/DexCache;");
259 
260   // Size of java.lang.DexCache.class.
261   static uint32_t ClassSize(PointerSize pointer_size);
262 
263   // Note: update the image version in image.cc if changing any of these cache sizes.
264 
265   // Size of type dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
266   static constexpr size_t kDexCacheTypeCacheSize = 1024;
267   static_assert(IsPowerOfTwo(kDexCacheTypeCacheSize),
268                 "Type dex cache size is not a power of 2.");
269 
270   // Size of string dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
271   static constexpr size_t kDexCacheStringCacheSize = 1024;
272   static_assert(IsPowerOfTwo(kDexCacheStringCacheSize),
273                 "String dex cache size is not a power of 2.");
274 
275   // Size of field dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
276   static constexpr size_t kDexCacheFieldCacheSize = 1024;
277   static_assert(IsPowerOfTwo(kDexCacheFieldCacheSize),
278                 "Field dex cache size is not a power of 2.");
279 
280   // Size of method dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
281   static constexpr size_t kDexCacheMethodCacheSize = 1024;
282   static_assert(IsPowerOfTwo(kDexCacheMethodCacheSize),
283                 "Method dex cache size is not a power of 2.");
284 
285   // Size of method type dex cache. Needs to be a power of 2 for entrypoint assumptions
286   // to hold.
287   static constexpr size_t kDexCacheMethodTypeCacheSize = 1024;
288   static_assert(IsPowerOfTwo(kDexCacheMethodTypeCacheSize),
289                 "MethodType dex cache size is not a power of 2.");
290 
291   // Size of an instance of java.lang.DexCache not including referenced values.
InstanceSize()292   static constexpr uint32_t InstanceSize() {
293     return sizeof(DexCache);
294   }
295 
296   // Visit gc-roots in DexCachePair array in [pairs_begin, pairs_end) range.
297   template <typename Visitor>
298   static void VisitDexCachePairRoots(Visitor& visitor,
299                                      DexCachePair<Object>* pairs_begin,
300                                      DexCachePair<Object>* pairs_end)
301       REQUIRES_SHARED(Locks::mutator_lock_);
302 
303   void Initialize(const DexFile* dex_file, ObjPtr<ClassLoader> class_loader)
304       REQUIRES_SHARED(Locks::mutator_lock_)
305       REQUIRES(Locks::dex_lock_);
306 
307   // Zero all array references.
308   // WARNING: This does not free the memory since it is in LinearAlloc.
309   void ResetNativeArrays() REQUIRES_SHARED(Locks::mutator_lock_);
310 
311   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
312            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
313   ObjPtr<String> GetLocation() REQUIRES_SHARED(Locks::mutator_lock_);
314 
315   String* GetResolvedString(dex::StringIndex string_idx) ALWAYS_INLINE
316       REQUIRES_SHARED(Locks::mutator_lock_);
317 
318   void SetResolvedString(dex::StringIndex string_idx, ObjPtr<mirror::String> resolved) ALWAYS_INLINE
319       REQUIRES_SHARED(Locks::mutator_lock_);
320 
321   // Clear a string for a string_idx, used to undo string intern transactions to make sure
322   // the string isn't kept live.
323   void ClearString(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
324 
325   Class* GetResolvedType(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
326 
327   void SetResolvedType(dex::TypeIndex type_idx, ObjPtr<Class> resolved)
328       REQUIRES_SHARED(Locks::mutator_lock_);
329 
330   void ClearResolvedType(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
331 
332   ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx)
333       REQUIRES_SHARED(Locks::mutator_lock_);
334 
335   ALWAYS_INLINE void SetResolvedMethod(uint32_t method_idx, ArtMethod* resolved)
336       REQUIRES_SHARED(Locks::mutator_lock_);
337 
338   ALWAYS_INLINE ArtField* GetResolvedField(uint32_t idx)
339       REQUIRES_SHARED(Locks::mutator_lock_);
340 
341   ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field)
342       REQUIRES_SHARED(Locks::mutator_lock_);
343 
344   MethodType* GetResolvedMethodType(dex::ProtoIndex proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
345 
346   void SetResolvedMethodType(dex::ProtoIndex proto_idx, MethodType* resolved)
347       REQUIRES_SHARED(Locks::mutator_lock_);
348 
349   // Clear a method type for proto_idx, used to undo method type resolution
350   // in aborted transactions to make sure the method type isn't kept live.
351   void ClearMethodType(dex::ProtoIndex proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
352 
353   CallSite* GetResolvedCallSite(uint32_t call_site_idx) REQUIRES_SHARED(Locks::mutator_lock_);
354 
355   // Attempts to bind |call_site_idx| to the call site |resolved|. The
356   // caller must use the return value in place of |resolved|. This is
357   // because multiple threads can invoke the bootstrap method each
358   // producing a call site, but the method handle invocation on the
359   // call site must be on a common agreed value.
360   ObjPtr<CallSite> SetResolvedCallSite(uint32_t call_site_idx, ObjPtr<CallSite> resolved)
361       REQUIRES_SHARED(Locks::mutator_lock_) WARN_UNUSED;
362 
GetDexFile()363   const DexFile* GetDexFile() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
364     return GetFieldPtr<const DexFile*>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_));
365   }
366 
SetDexFile(const DexFile * dex_file)367   void SetDexFile(const DexFile* dex_file) REQUIRES_SHARED(Locks::mutator_lock_) {
368     SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file);
369   }
370 
371   void SetLocation(ObjPtr<String> location) REQUIRES_SHARED(Locks::mutator_lock_);
372 
373   void VisitReflectiveTargets(ReflectiveValueVisitor* visitor) REQUIRES(Locks::mutator_lock_);
374 
375   void SetClassLoader(ObjPtr<ClassLoader> class_loader) REQUIRES_SHARED(Locks::mutator_lock_);
376 
377   ObjPtr<ClassLoader> GetClassLoader() REQUIRES_SHARED(Locks::mutator_lock_);
378 
379   template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
380             ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
381             typename Visitor>
382   void VisitNativeRoots(const Visitor& visitor)
383       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
384 
385   // Sets null to dex cache array fields which were allocated with the startup
386   // allocator.
387   void UnlinkStartupCaches() REQUIRES_SHARED(Locks::mutator_lock_);
388 
389   // Returns whether we should allocate a full array given the number of elements.
390   // Note: update the image version in image.cc if changing this method.
ShouldAllocateFullArray(size_t number_of_elements,size_t dex_cache_size)391   static bool ShouldAllocateFullArray(size_t number_of_elements, size_t dex_cache_size) {
392     return number_of_elements <= dex_cache_size;
393   }
394 
395 
396 // NOLINTBEGIN(bugprone-macro-parentheses)
397 #define DEFINE_ARRAY(name, array_kind, getter_setter, type, ids, alloc_kind) \
398   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> \
399   array_kind* Get ##getter_setter() \
400       ALWAYS_INLINE \
401       REQUIRES_SHARED(Locks::mutator_lock_) { \
402     return GetFieldPtr<array_kind*, kVerifyFlags>(getter_setter ##Offset()); \
403   } \
404   void Set ##getter_setter(array_kind* value) \
405       REQUIRES_SHARED(Locks::mutator_lock_) { \
406     SetFieldPtr<false>(getter_setter ##Offset(), value); \
407   } \
408   static constexpr MemberOffset getter_setter ##Offset() { \
409     return OFFSET_OF_OBJECT_MEMBER(DexCache, name); \
410   } \
411   array_kind* Allocate ##getter_setter(bool startup = false) \
412       REQUIRES_SHARED(Locks::mutator_lock_) { \
413     return reinterpret_cast<array_kind*>(AllocArray<type>( \
414         getter_setter ##Offset(), GetDexFile()->ids(), alloc_kind, startup)); \
415   } \
416   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> \
417   size_t Num ##getter_setter() REQUIRES_SHARED(Locks::mutator_lock_) { \
418     return Get ##getter_setter() == nullptr ? 0u : GetDexFile()->ids(); \
419   } \
420 
421 #define DEFINE_PAIR_ARRAY(name, pair_kind, getter_setter, type, size, alloc_kind) \
422   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> \
423   pair_kind ##Array<type, size>* Get ##getter_setter() \
424       ALWAYS_INLINE \
425       REQUIRES_SHARED(Locks::mutator_lock_) { \
426     return GetFieldPtr<pair_kind ##Array<type, size>*, kVerifyFlags>(getter_setter ##Offset()); \
427   } \
428   void Set ##getter_setter(pair_kind ##Array<type, size>* value) \
429       REQUIRES_SHARED(Locks::mutator_lock_) { \
430     SetFieldPtr<false>(getter_setter ##Offset(), value); \
431   } \
432   static constexpr MemberOffset getter_setter ##Offset() { \
433     return OFFSET_OF_OBJECT_MEMBER(DexCache, name); \
434   } \
435   pair_kind ##Array<type, size>* Allocate ##getter_setter() \
436       REQUIRES_SHARED(Locks::mutator_lock_) { \
437     return reinterpret_cast<pair_kind ##Array<type, size>*>( \
438         AllocArray<std::atomic<pair_kind<type>>>( \
439             getter_setter ##Offset(), size, alloc_kind)); \
440   } \
441   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> \
442   size_t Num ##getter_setter() REQUIRES_SHARED(Locks::mutator_lock_) { \
443     return Get ##getter_setter() == nullptr ? 0u : size; \
444   } \
445 
446 #define DEFINE_DUAL_CACHE( \
447     name, pair_kind, getter_setter, type, pair_size, alloc_pair_kind, \
448     array_kind, component_type, ids, alloc_array_kind) \
449   DEFINE_PAIR_ARRAY( \
450       name, pair_kind, getter_setter, type, pair_size, alloc_pair_kind) \
451   DEFINE_ARRAY( \
452       name ##array_, array_kind, getter_setter ##Array, component_type, ids, alloc_array_kind) \
453   type* Get ##getter_setter ##Entry(uint32_t index) REQUIRES_SHARED(Locks::mutator_lock_) { \
454     DCHECK_LT(index, GetDexFile()->ids()); \
455     auto* array = Get ##getter_setter ##Array(); \
456     if (array != nullptr) { \
457       return array->Get(index); \
458     } \
459     auto* pairs = Get ##getter_setter(); \
460     if (pairs != nullptr) { \
461       return pairs->Get(index); \
462     } \
463     return nullptr; \
464   } \
465   void Set ##getter_setter ##Entry(uint32_t index, type* resolved) \
466       REQUIRES_SHARED(Locks::mutator_lock_) { \
467     DCHECK_LT(index, GetDexFile()->ids()); \
468     auto* array = Get ##getter_setter ##Array(); \
469     if (array != nullptr) { \
470       array->Set(index, resolved); \
471     } else { \
472       auto* pairs = Get ##getter_setter(); \
473       if (pairs == nullptr) { \
474         bool should_allocate_full_array = ShouldAllocateFullArray(GetDexFile()->ids(), pair_size); \
475         if (ShouldAllocateFullArrayAtStartup() || should_allocate_full_array) { \
476           array = Allocate ##getter_setter ##Array(!should_allocate_full_array); \
477           array->Set(index, resolved); \
478         } else { \
479           pairs = Allocate ##getter_setter(); \
480           pairs->Set(index, resolved); \
481         } \
482       } else { \
483         pairs->Set(index, resolved); \
484       } \
485     } \
486   } \
487   void Unlink ##getter_setter ##ArrayIfStartup() \
488       REQUIRES_SHARED(Locks::mutator_lock_) { \
489     if (!ShouldAllocateFullArray(GetDexFile()->ids(), pair_size)) { \
490       Set ##getter_setter ##Array(nullptr) ; \
491     } \
492   }
493 
494   DEFINE_ARRAY(resolved_call_sites_,
495                GcRootArray<CallSite>,
496                ResolvedCallSites,
497                GcRoot<CallSite>,
498                NumCallSiteIds,
499                LinearAllocKind::kGCRootArray)
500 
501   DEFINE_DUAL_CACHE(resolved_fields_,
502                     NativeDexCachePair,
503                     ResolvedFields,
504                     ArtField,
505                     kDexCacheFieldCacheSize,
506                     LinearAllocKind::kNoGCRoots,
507                     NativeArray<ArtField>,
508                     ArtField,
509                     NumFieldIds,
510                     LinearAllocKind::kNoGCRoots)
511 
512   DEFINE_DUAL_CACHE(resolved_method_types_,
513                     DexCachePair,
514                     ResolvedMethodTypes,
515                     mirror::MethodType,
516                     kDexCacheMethodTypeCacheSize,
517                     LinearAllocKind::kDexCacheArray,
518                     GcRootArray<mirror::MethodType>,
519                     GcRoot<mirror::MethodType>,
520                     NumProtoIds,
521                     LinearAllocKind::kGCRootArray);
522 
523   DEFINE_DUAL_CACHE(resolved_methods_,
524                     NativeDexCachePair,
525                     ResolvedMethods,
526                     ArtMethod,
527                     kDexCacheMethodCacheSize,
528                     LinearAllocKind::kNoGCRoots,
529                     NativeArray<ArtMethod>,
530                     ArtMethod,
531                     NumMethodIds,
532                     LinearAllocKind::kNoGCRoots)
533 
534   DEFINE_DUAL_CACHE(resolved_types_,
535                     DexCachePair,
536                     ResolvedTypes,
537                     mirror::Class,
538                     kDexCacheTypeCacheSize,
539                     LinearAllocKind::kDexCacheArray,
540                     GcRootArray<mirror::Class>,
541                     GcRoot<mirror::Class>,
542                     NumTypeIds,
543                     LinearAllocKind::kGCRootArray);
544 
545   DEFINE_DUAL_CACHE(strings_,
546                     DexCachePair,
547                     Strings,
548                     mirror::String,
549                     kDexCacheStringCacheSize,
550                     LinearAllocKind::kDexCacheArray,
551                     GcRootArray<mirror::String>,
552                     GcRoot<mirror::String>,
553                     NumStringIds,
554                     LinearAllocKind::kGCRootArray);
555 
556 // NOLINTEND(bugprone-macro-parentheses)
557 
558  private:
559   // Allocate new array in linear alloc and save it in the given fields.
560   template<typename T>
561   T* AllocArray(MemberOffset obj_offset, size_t num, LinearAllocKind kind, bool startup = false)
562      REQUIRES_SHARED(Locks::mutator_lock_);
563 
564   // Visit instance fields of the dex cache as well as its associated arrays.
565   template <bool kVisitNativeRoots,
566             VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
567             ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
568             typename Visitor>
569   void VisitReferences(ObjPtr<Class> klass, const Visitor& visitor)
570       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
571 
572   // Returns whether we should allocate a full array given the current state of
573   // the runtime and oat files.
574   bool ShouldAllocateFullArrayAtStartup() REQUIRES_SHARED(Locks::mutator_lock_);
575 
576   HeapReference<ClassLoader> class_loader_;
577   HeapReference<String> location_;
578 
579   uint64_t dex_file_;                     // const DexFile*
580                                           //
581   uint64_t resolved_call_sites_;          // Array of call sites
582   uint64_t resolved_fields_;              // NativeDexCacheArray holding ArtField's
583   uint64_t resolved_fields_array_;        // Array of ArtField's.
584   uint64_t resolved_method_types_;        // DexCacheArray holding mirror::MethodType's
585   uint64_t resolved_method_types_array_;  // Array of mirror::MethodType's
586   uint64_t resolved_methods_;             // NativeDexCacheArray holding ArtMethod's
587   uint64_t resolved_methods_array_;       // Array of ArtMethod's
588   uint64_t resolved_types_;               // DexCacheArray holding mirror::Class's
589   uint64_t resolved_types_array_;         // Array of resolved types.
590   uint64_t strings_;                      // DexCacheArray holding mirror::String's
591   uint64_t strings_array_;                // Array of String's.
592 
593   friend struct art::DexCacheOffsets;  // for verifying offset information
594   friend class linker::ImageWriter;
595   friend class Object;  // For VisitReferences
596   DISALLOW_IMPLICIT_CONSTRUCTORS(DexCache);
597 };
598 
599 }  // namespace mirror
600 }  // namespace art
601 
602 #endif  // ART_RUNTIME_MIRROR_DEX_CACHE_H_
603