• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_MIRROR_DEX_CACHE_H_
18 #define ART_RUNTIME_MIRROR_DEX_CACHE_H_
19 
20 #include "array.h"
21 #include "base/bit_utils.h"
22 #include "dex_file_types.h"
23 #include "object.h"
24 #include "object_array.h"
25 
26 namespace art {
27 
28 class ArtField;
29 class ArtMethod;
30 struct DexCacheOffsets;
31 class DexFile;
32 class ImageWriter;
33 union JValue;
34 class LinearAlloc;
35 class Thread;
36 
37 namespace mirror {
38 
39 class CallSite;
40 class Class;
41 class MethodType;
42 class String;
43 
44 template <typename T> struct PACKED(8) DexCachePair {
45   GcRoot<T> object;
46   uint32_t index;
47   // The array is initially [ {0,0}, {0,0}, {0,0} ... ]
48   // We maintain the invariant that once a dex cache entry is populated,
49   // the pointer is always non-0
50   // Any given entry would thus be:
51   // {non-0, non-0} OR {0,0}
52   //
53   // It's generally sufficiently enough then to check if the
54   // lookup index matches the stored index (for a >0 lookup index)
55   // because if it's true the pointer is also non-null.
56   //
57   // For the 0th entry which is a special case, the value is either
58   // {0,0} (initial state) or {non-0, 0} which indicates
59   // that a valid object is stored at that index for a dex section id of 0.
60   //
61   // As an optimization, we want to avoid branching on the object pointer since
62   // it's always non-null if the id branch succeeds (except for the 0th id).
63   // Set the initial state for the 0th entry to be {0,1} which is guaranteed to fail
64   // the lookup id == stored id branch.
DexCachePairDexCachePair65   DexCachePair(ObjPtr<T> object, uint32_t index)
66       : object(object),
67         index(index) {}
DexCachePairDexCachePair68   DexCachePair() : index(0) {}
69   DexCachePair(const DexCachePair<T>&) = default;
70   DexCachePair& operator=(const DexCachePair<T>&) = default;
71 
InitializeDexCachePair72   static void Initialize(std::atomic<DexCachePair<T>>* dex_cache) {
73     DexCachePair<T> first_elem;
74     first_elem.object = GcRoot<T>(nullptr);
75     first_elem.index = InvalidIndexForSlot(0);
76     dex_cache[0].store(first_elem, std::memory_order_relaxed);
77   }
78 
InvalidIndexForSlotDexCachePair79   static uint32_t InvalidIndexForSlot(uint32_t slot) {
80     // Since the cache size is a power of two, 0 will always map to slot 0.
81     // Use 1 for slot 0 and 0 for all other slots.
82     return (slot == 0) ? 1u : 0u;
83   }
84 
GetObjectForIndexDexCachePair85   T* GetObjectForIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
86     if (idx != index) {
87       return nullptr;
88     }
89     DCHECK(!object.IsNull());
90     return object.Read();
91   }
92 };
93 
94 template <typename T> struct PACKED(2 * __SIZEOF_POINTER__) NativeDexCachePair {
95   T* object;
96   size_t index;
97   // This is similar to DexCachePair except that we're storing a native pointer
98   // instead of a GC root. See DexCachePair for the details.
NativeDexCachePairNativeDexCachePair99   NativeDexCachePair(T* object, uint32_t index)
100       : object(object),
101         index(index) {}
NativeDexCachePairNativeDexCachePair102   NativeDexCachePair() : object(nullptr), index(0u) { }
103   NativeDexCachePair(const NativeDexCachePair<T>&) = default;
104   NativeDexCachePair& operator=(const NativeDexCachePair<T>&) = default;
105 
106   static void Initialize(std::atomic<NativeDexCachePair<T>>* dex_cache, PointerSize pointer_size);
107 
InvalidIndexForSlotNativeDexCachePair108   static uint32_t InvalidIndexForSlot(uint32_t slot) {
109     // Since the cache size is a power of two, 0 will always map to slot 0.
110     // Use 1 for slot 0 and 0 for all other slots.
111     return (slot == 0) ? 1u : 0u;
112   }
113 
GetObjectForIndexNativeDexCachePair114   T* GetObjectForIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
115     if (idx != index) {
116       return nullptr;
117     }
118     DCHECK(object != nullptr);
119     return object;
120   }
121 };
122 
123 using TypeDexCachePair = DexCachePair<Class>;
124 using TypeDexCacheType = std::atomic<TypeDexCachePair>;
125 
126 using StringDexCachePair = DexCachePair<String>;
127 using StringDexCacheType = std::atomic<StringDexCachePair>;
128 
129 using FieldDexCachePair = NativeDexCachePair<ArtField>;
130 using FieldDexCacheType = std::atomic<FieldDexCachePair>;
131 
132 using MethodDexCachePair = NativeDexCachePair<ArtMethod>;
133 using MethodDexCacheType = std::atomic<MethodDexCachePair>;
134 
135 using MethodTypeDexCachePair = DexCachePair<MethodType>;
136 using MethodTypeDexCacheType = std::atomic<MethodTypeDexCachePair>;
137 
138 // C++ mirror of java.lang.DexCache.
139 class MANAGED DexCache FINAL : public Object {
140  public:
141   // Size of java.lang.DexCache.class.
142   static uint32_t ClassSize(PointerSize pointer_size);
143 
144   // Size of type dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
145   static constexpr size_t kDexCacheTypeCacheSize = 1024;
146   static_assert(IsPowerOfTwo(kDexCacheTypeCacheSize),
147                 "Type dex cache size is not a power of 2.");
148 
149   // Size of string dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
150   static constexpr size_t kDexCacheStringCacheSize = 1024;
151   static_assert(IsPowerOfTwo(kDexCacheStringCacheSize),
152                 "String dex cache size is not a power of 2.");
153 
154   // Size of field dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
155   static constexpr size_t kDexCacheFieldCacheSize = 1024;
156   static_assert(IsPowerOfTwo(kDexCacheFieldCacheSize),
157                 "Field dex cache size is not a power of 2.");
158 
159   // Size of method dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
160   static constexpr size_t kDexCacheMethodCacheSize = 1024;
161   static_assert(IsPowerOfTwo(kDexCacheMethodCacheSize),
162                 "Method dex cache size is not a power of 2.");
163 
164   // Size of method type dex cache. Needs to be a power of 2 for entrypoint assumptions
165   // to hold.
166   static constexpr size_t kDexCacheMethodTypeCacheSize = 1024;
167   static_assert(IsPowerOfTwo(kDexCacheMethodTypeCacheSize),
168                 "MethodType dex cache size is not a power of 2.");
169 
StaticTypeSize()170   static constexpr size_t StaticTypeSize() {
171     return kDexCacheTypeCacheSize;
172   }
173 
StaticStringSize()174   static constexpr size_t StaticStringSize() {
175     return kDexCacheStringCacheSize;
176   }
177 
StaticArtFieldSize()178   static constexpr size_t StaticArtFieldSize() {
179     return kDexCacheFieldCacheSize;
180   }
181 
StaticMethodSize()182   static constexpr size_t StaticMethodSize() {
183     return kDexCacheMethodCacheSize;
184   }
185 
StaticMethodTypeSize()186   static constexpr size_t StaticMethodTypeSize() {
187     return kDexCacheMethodTypeCacheSize;
188   }
189 
190   // Size of an instance of java.lang.DexCache not including referenced values.
InstanceSize()191   static constexpr uint32_t InstanceSize() {
192     return sizeof(DexCache);
193   }
194 
195   static void InitializeDexCache(Thread* self,
196                                  ObjPtr<mirror::DexCache> dex_cache,
197                                  ObjPtr<mirror::String> location,
198                                  const DexFile* dex_file,
199                                  LinearAlloc* linear_alloc,
200                                  PointerSize image_pointer_size)
201       REQUIRES_SHARED(Locks::mutator_lock_)
202       REQUIRES(Locks::dex_lock_);
203 
204   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
205   void FixupStrings(StringDexCacheType* dest, const Visitor& visitor)
206       REQUIRES_SHARED(Locks::mutator_lock_);
207 
208   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
209   void FixupResolvedTypes(TypeDexCacheType* dest, const Visitor& visitor)
210       REQUIRES_SHARED(Locks::mutator_lock_);
211 
212   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
213   void FixupResolvedMethodTypes(MethodTypeDexCacheType* dest, const Visitor& visitor)
214       REQUIRES_SHARED(Locks::mutator_lock_);
215 
216   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
217   void FixupResolvedCallSites(GcRoot<mirror::CallSite>* dest, const Visitor& visitor)
218       REQUIRES_SHARED(Locks::mutator_lock_);
219 
GetLocation()220   String* GetLocation() REQUIRES_SHARED(Locks::mutator_lock_) {
221     return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_));
222   }
223 
StringsOffset()224   static MemberOffset StringsOffset() {
225     return OFFSET_OF_OBJECT_MEMBER(DexCache, strings_);
226   }
227 
ResolvedTypesOffset()228   static MemberOffset ResolvedTypesOffset() {
229     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_);
230   }
231 
ResolvedFieldsOffset()232   static MemberOffset ResolvedFieldsOffset() {
233     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_fields_);
234   }
235 
ResolvedMethodsOffset()236   static MemberOffset ResolvedMethodsOffset() {
237     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_methods_);
238   }
239 
ResolvedMethodTypesOffset()240   static MemberOffset ResolvedMethodTypesOffset() {
241     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_method_types_);
242   }
243 
ResolvedCallSitesOffset()244   static MemberOffset ResolvedCallSitesOffset() {
245     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_call_sites_);
246   }
247 
NumStringsOffset()248   static MemberOffset NumStringsOffset() {
249     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_strings_);
250   }
251 
NumResolvedTypesOffset()252   static MemberOffset NumResolvedTypesOffset() {
253     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_types_);
254   }
255 
NumResolvedFieldsOffset()256   static MemberOffset NumResolvedFieldsOffset() {
257     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_fields_);
258   }
259 
NumResolvedMethodsOffset()260   static MemberOffset NumResolvedMethodsOffset() {
261     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_methods_);
262   }
263 
NumResolvedMethodTypesOffset()264   static MemberOffset NumResolvedMethodTypesOffset() {
265     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_method_types_);
266   }
267 
NumResolvedCallSitesOffset()268   static MemberOffset NumResolvedCallSitesOffset() {
269     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_call_sites_);
270   }
271 
272   String* GetResolvedString(dex::StringIndex string_idx) ALWAYS_INLINE
273       REQUIRES_SHARED(Locks::mutator_lock_);
274 
275   void SetResolvedString(dex::StringIndex string_idx, ObjPtr<mirror::String> resolved) ALWAYS_INLINE
276       REQUIRES_SHARED(Locks::mutator_lock_);
277 
278   // Clear a string for a string_idx, used to undo string intern transactions to make sure
279   // the string isn't kept live.
280   void ClearString(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
281 
282   Class* GetResolvedType(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
283 
284   void SetResolvedType(dex::TypeIndex type_idx, ObjPtr<Class> resolved)
285       REQUIRES_SHARED(Locks::mutator_lock_);
286 
287   void ClearResolvedType(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
288 
289   ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx, PointerSize ptr_size)
290       REQUIRES_SHARED(Locks::mutator_lock_);
291 
292   ALWAYS_INLINE void SetResolvedMethod(uint32_t method_idx,
293                                        ArtMethod* resolved,
294                                        PointerSize ptr_size)
295       REQUIRES_SHARED(Locks::mutator_lock_);
296   ALWAYS_INLINE void ClearResolvedMethod(uint32_t method_idx, PointerSize ptr_size)
297       REQUIRES_SHARED(Locks::mutator_lock_);
298 
299   // Pointer sized variant, used for patching.
300   ALWAYS_INLINE ArtField* GetResolvedField(uint32_t idx, PointerSize ptr_size)
301       REQUIRES_SHARED(Locks::mutator_lock_);
302 
303   // Pointer sized variant, used for patching.
304   ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field, PointerSize ptr_size)
305       REQUIRES_SHARED(Locks::mutator_lock_);
306   ALWAYS_INLINE void ClearResolvedField(uint32_t idx, PointerSize ptr_size)
307       REQUIRES_SHARED(Locks::mutator_lock_);
308 
309   MethodType* GetResolvedMethodType(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
310 
311   void SetResolvedMethodType(uint32_t proto_idx, MethodType* resolved)
312       REQUIRES_SHARED(Locks::mutator_lock_);
313 
314   CallSite* GetResolvedCallSite(uint32_t call_site_idx) REQUIRES_SHARED(Locks::mutator_lock_);
315 
316   // Attempts to bind |call_site_idx| to the call site |resolved|. The
317   // caller must use the return value in place of |resolved|. This is
318   // because multiple threads can invoke the bootstrap method each
319   // producing a call site, but the method handle invocation on the
320   // call site must be on a common agreed value.
321   CallSite* SetResolvedCallSite(uint32_t call_site_idx, CallSite* resolved) WARN_UNUSED
322       REQUIRES_SHARED(Locks::mutator_lock_);
323 
GetStrings()324   StringDexCacheType* GetStrings() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
325     return GetFieldPtr64<StringDexCacheType*>(StringsOffset());
326   }
327 
SetStrings(StringDexCacheType * strings)328   void SetStrings(StringDexCacheType* strings) ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
329     SetFieldPtr<false>(StringsOffset(), strings);
330   }
331 
GetResolvedTypes()332   TypeDexCacheType* GetResolvedTypes() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
333     return GetFieldPtr<TypeDexCacheType*>(ResolvedTypesOffset());
334   }
335 
SetResolvedTypes(TypeDexCacheType * resolved_types)336   void SetResolvedTypes(TypeDexCacheType* resolved_types)
337       ALWAYS_INLINE
338       REQUIRES_SHARED(Locks::mutator_lock_) {
339     SetFieldPtr<false>(ResolvedTypesOffset(), resolved_types);
340   }
341 
GetResolvedMethods()342   MethodDexCacheType* GetResolvedMethods() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
343     return GetFieldPtr<MethodDexCacheType*>(ResolvedMethodsOffset());
344   }
345 
SetResolvedMethods(MethodDexCacheType * resolved_methods)346   void SetResolvedMethods(MethodDexCacheType* resolved_methods)
347       ALWAYS_INLINE
348       REQUIRES_SHARED(Locks::mutator_lock_) {
349     SetFieldPtr<false>(ResolvedMethodsOffset(), resolved_methods);
350   }
351 
GetResolvedFields()352   FieldDexCacheType* GetResolvedFields() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
353     return GetFieldPtr<FieldDexCacheType*>(ResolvedFieldsOffset());
354   }
355 
SetResolvedFields(FieldDexCacheType * resolved_fields)356   void SetResolvedFields(FieldDexCacheType* resolved_fields)
357       ALWAYS_INLINE
358       REQUIRES_SHARED(Locks::mutator_lock_) {
359     SetFieldPtr<false>(ResolvedFieldsOffset(), resolved_fields);
360   }
361 
GetResolvedMethodTypes()362   MethodTypeDexCacheType* GetResolvedMethodTypes()
363       ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
364     return GetFieldPtr64<MethodTypeDexCacheType*>(ResolvedMethodTypesOffset());
365   }
366 
SetResolvedMethodTypes(MethodTypeDexCacheType * resolved_method_types)367   void SetResolvedMethodTypes(MethodTypeDexCacheType* resolved_method_types)
368       ALWAYS_INLINE
369       REQUIRES_SHARED(Locks::mutator_lock_) {
370     SetFieldPtr<false>(ResolvedMethodTypesOffset(), resolved_method_types);
371   }
372 
GetResolvedCallSites()373   GcRoot<CallSite>* GetResolvedCallSites()
374       ALWAYS_INLINE
375       REQUIRES_SHARED(Locks::mutator_lock_) {
376     return GetFieldPtr<GcRoot<CallSite>*>(ResolvedCallSitesOffset());
377   }
378 
SetResolvedCallSites(GcRoot<CallSite> * resolved_call_sites)379   void SetResolvedCallSites(GcRoot<CallSite>* resolved_call_sites)
380       ALWAYS_INLINE
381       REQUIRES_SHARED(Locks::mutator_lock_) {
382     SetFieldPtr<false>(ResolvedCallSitesOffset(), resolved_call_sites);
383   }
384 
NumStrings()385   size_t NumStrings() REQUIRES_SHARED(Locks::mutator_lock_) {
386     return GetField32(NumStringsOffset());
387   }
388 
NumResolvedTypes()389   size_t NumResolvedTypes() REQUIRES_SHARED(Locks::mutator_lock_) {
390     return GetField32(NumResolvedTypesOffset());
391   }
392 
NumResolvedMethods()393   size_t NumResolvedMethods() REQUIRES_SHARED(Locks::mutator_lock_) {
394     return GetField32(NumResolvedMethodsOffset());
395   }
396 
NumResolvedFields()397   size_t NumResolvedFields() REQUIRES_SHARED(Locks::mutator_lock_) {
398     return GetField32(NumResolvedFieldsOffset());
399   }
400 
NumResolvedMethodTypes()401   size_t NumResolvedMethodTypes() REQUIRES_SHARED(Locks::mutator_lock_) {
402     return GetField32(NumResolvedMethodTypesOffset());
403   }
404 
NumResolvedCallSites()405   size_t NumResolvedCallSites() REQUIRES_SHARED(Locks::mutator_lock_) {
406     return GetField32(NumResolvedCallSitesOffset());
407   }
408 
GetDexFile()409   const DexFile* GetDexFile() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
410     return GetFieldPtr<const DexFile*>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_));
411   }
412 
SetDexFile(const DexFile * dex_file)413   void SetDexFile(const DexFile* dex_file) REQUIRES_SHARED(Locks::mutator_lock_) {
414     SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file);
415   }
416 
417   void SetLocation(ObjPtr<String> location) REQUIRES_SHARED(Locks::mutator_lock_);
418 
419   // NOTE: Get/SetElementPtrSize() are intended for working with ArtMethod** and ArtField**
420   // provided by GetResolvedMethods/Fields() and ArtMethod::GetDexCacheResolvedMethods(),
421   // so they need to be public.
422 
423   template <typename PtrType>
424   static PtrType GetElementPtrSize(PtrType* ptr_array, size_t idx, PointerSize ptr_size);
425 
426   template <typename PtrType>
427   static void SetElementPtrSize(PtrType* ptr_array, size_t idx, PtrType ptr, PointerSize ptr_size);
428 
429   template <typename T>
430   static NativeDexCachePair<T> GetNativePairPtrSize(std::atomic<NativeDexCachePair<T>>* pair_array,
431                                                     size_t idx,
432                                                     PointerSize ptr_size);
433 
434   template <typename T>
435   static void SetNativePairPtrSize(std::atomic<NativeDexCachePair<T>>* pair_array,
436                                    size_t idx,
437                                    NativeDexCachePair<T> pair,
438                                    PointerSize ptr_size);
439 
440   uint32_t StringSlotIndex(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
441   uint32_t TypeSlotIndex(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
442   uint32_t FieldSlotIndex(uint32_t field_idx) REQUIRES_SHARED(Locks::mutator_lock_);
443   uint32_t MethodSlotIndex(uint32_t method_idx) REQUIRES_SHARED(Locks::mutator_lock_);
444   uint32_t MethodTypeSlotIndex(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
445 
446  private:
447   void Init(const DexFile* dex_file,
448             ObjPtr<String> location,
449             StringDexCacheType* strings,
450             uint32_t num_strings,
451             TypeDexCacheType* resolved_types,
452             uint32_t num_resolved_types,
453             MethodDexCacheType* resolved_methods,
454             uint32_t num_resolved_methods,
455             FieldDexCacheType* resolved_fields,
456             uint32_t num_resolved_fields,
457             MethodTypeDexCacheType* resolved_method_types,
458             uint32_t num_resolved_method_types,
459             GcRoot<CallSite>* resolved_call_sites,
460             uint32_t num_resolved_call_sites)
461       REQUIRES_SHARED(Locks::mutator_lock_);
462 
463   // std::pair<> is not trivially copyable and as such it is unsuitable for atomic operations,
464   // so we use a custom pair class for loading and storing the NativeDexCachePair<>.
465   template <typename IntType>
466   struct PACKED(2 * sizeof(IntType)) ConversionPair {
ConversionPairConversionPair467     ConversionPair(IntType f, IntType s) : first(f), second(s) { }
468     ConversionPair(const ConversionPair&) = default;
469     ConversionPair& operator=(const ConversionPair&) = default;
470     IntType first;
471     IntType second;
472   };
473   using ConversionPair32 = ConversionPair<uint32_t>;
474   using ConversionPair64 = ConversionPair<uint64_t>;
475 
476   // Visit instance fields of the dex cache as well as its associated arrays.
477   template <bool kVisitNativeRoots,
478             VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
479             ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
480             typename Visitor>
481   void VisitReferences(ObjPtr<Class> klass, const Visitor& visitor)
482       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
483 
484   // Due to lack of 16-byte atomics support, we use hand-crafted routines.
485 #if defined(__aarch64__)
486   // 16-byte atomics are supported on aarch64.
AtomicLoadRelaxed16B(std::atomic<ConversionPair64> * target)487   ALWAYS_INLINE static ConversionPair64 AtomicLoadRelaxed16B(
488       std::atomic<ConversionPair64>* target) {
489     return target->load(std::memory_order_relaxed);
490   }
491 
AtomicStoreRelease16B(std::atomic<ConversionPair64> * target,ConversionPair64 value)492   ALWAYS_INLINE static void AtomicStoreRelease16B(
493       std::atomic<ConversionPair64>* target, ConversionPair64 value) {
494     target->store(value, std::memory_order_release);
495   }
496 #elif defined(__x86_64__)
AtomicLoadRelaxed16B(std::atomic<ConversionPair64> * target)497   ALWAYS_INLINE static ConversionPair64 AtomicLoadRelaxed16B(
498       std::atomic<ConversionPair64>* target) {
499     uint64_t first, second;
500     __asm__ __volatile__(
501         "lock cmpxchg16b (%2)"
502         : "=&a"(first), "=&d"(second)
503         : "r"(target), "a"(0), "d"(0), "b"(0), "c"(0)
504         : "cc");
505     return ConversionPair64(first, second);
506   }
507 
AtomicStoreRelease16B(std::atomic<ConversionPair64> * target,ConversionPair64 value)508   ALWAYS_INLINE static void AtomicStoreRelease16B(
509       std::atomic<ConversionPair64>* target, ConversionPair64 value) {
510     uint64_t first, second;
511     __asm__ __volatile__ (
512         "movq (%2), %%rax\n\t"
513         "movq 8(%2), %%rdx\n\t"
514         "1:\n\t"
515         "lock cmpxchg16b (%2)\n\t"
516         "jnz 1b"
517         : "=&a"(first), "=&d"(second)
518         : "r"(target), "b"(value.first), "c"(value.second)
519         : "cc");
520   }
521 #else
522   static ConversionPair64 AtomicLoadRelaxed16B(std::atomic<ConversionPair64>* target);
523   static void AtomicStoreRelease16B(std::atomic<ConversionPair64>* target, ConversionPair64 value);
524 #endif
525 
526   HeapReference<String> location_;
527   // Number of elements in the call_sites_ array. Note that this appears here
528   // because of our packing logic for 32 bit fields.
529   uint32_t num_resolved_call_sites_;
530 
531   uint64_t dex_file_;               // const DexFile*
532   uint64_t resolved_call_sites_;    // GcRoot<CallSite>* array with num_resolved_call_sites_
533                                     // elements.
534   uint64_t resolved_fields_;        // std::atomic<FieldDexCachePair>*, array with
535                                     // num_resolved_fields_ elements.
536   uint64_t resolved_method_types_;  // std::atomic<MethodTypeDexCachePair>* array with
537                                     // num_resolved_method_types_ elements.
538   uint64_t resolved_methods_;       // ArtMethod*, array with num_resolved_methods_ elements.
539   uint64_t resolved_types_;         // TypeDexCacheType*, array with num_resolved_types_ elements.
540   uint64_t strings_;                // std::atomic<StringDexCachePair>*, array with num_strings_
541                                     // elements.
542 
543   uint32_t num_resolved_fields_;        // Number of elements in the resolved_fields_ array.
544   uint32_t num_resolved_method_types_;  // Number of elements in the resolved_method_types_ array.
545   uint32_t num_resolved_methods_;       // Number of elements in the resolved_methods_ array.
546   uint32_t num_resolved_types_;         // Number of elements in the resolved_types_ array.
547   uint32_t num_strings_;                // Number of elements in the strings_ array.
548 
549   friend struct art::DexCacheOffsets;  // for verifying offset information
550   friend class Object;  // For VisitReferences
551   DISALLOW_IMPLICIT_CONSTRUCTORS(DexCache);
552 };
553 
554 }  // namespace mirror
555 }  // namespace art
556 
557 #endif  // ART_RUNTIME_MIRROR_DEX_CACHE_H_
558