• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_MIRROR_DEX_CACHE_H_
18 #define ART_RUNTIME_MIRROR_DEX_CACHE_H_
19 
20 #include "array.h"
21 #include "base/bit_utils.h"
22 #include "base/locks.h"
23 #include "dex/dex_file_types.h"
24 #include "gc_root.h"  // Note: must not use -inl here to avoid circular dependency.
25 #include "object.h"
26 #include "object_array.h"
27 
28 namespace art {
29 
30 namespace linker {
31 class ImageWriter;
32 }  // namespace linker
33 
34 class ArtField;
35 class ArtMethod;
36 struct DexCacheOffsets;
37 class DexFile;
38 union JValue;
39 class LinearAlloc;
40 class Thread;
41 
42 namespace mirror {
43 
44 class CallSite;
45 class Class;
46 class MethodType;
47 class String;
48 
49 template <typename T> struct PACKED(8) DexCachePair {
50   GcRoot<T> object;
51   uint32_t index;
52   // The array is initially [ {0,0}, {0,0}, {0,0} ... ]
53   // We maintain the invariant that once a dex cache entry is populated,
54   // the pointer is always non-0
55   // Any given entry would thus be:
56   // {non-0, non-0} OR {0,0}
57   //
58   // It's generally sufficiently enough then to check if the
59   // lookup index matches the stored index (for a >0 lookup index)
60   // because if it's true the pointer is also non-null.
61   //
62   // For the 0th entry which is a special case, the value is either
63   // {0,0} (initial state) or {non-0, 0} which indicates
64   // that a valid object is stored at that index for a dex section id of 0.
65   //
66   // As an optimization, we want to avoid branching on the object pointer since
67   // it's always non-null if the id branch succeeds (except for the 0th id).
68   // Set the initial state for the 0th entry to be {0,1} which is guaranteed to fail
69   // the lookup id == stored id branch.
70   DexCachePair(ObjPtr<T> object, uint32_t index);
DexCachePairDexCachePair71   DexCachePair() : index(0) {}
72   DexCachePair(const DexCachePair<T>&) = default;
73   DexCachePair& operator=(const DexCachePair<T>&) = default;
74 
75   static void Initialize(std::atomic<DexCachePair<T>>* dex_cache);
76 
InvalidIndexForSlotDexCachePair77   static uint32_t InvalidIndexForSlot(uint32_t slot) {
78     // Since the cache size is a power of two, 0 will always map to slot 0.
79     // Use 1 for slot 0 and 0 for all other slots.
80     return (slot == 0) ? 1u : 0u;
81   }
82 
83   T* GetObjectForIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_);
84 };
85 
86 template <typename T> struct PACKED(2 * __SIZEOF_POINTER__) NativeDexCachePair {
87   T* object;
88   size_t index;
89   // This is similar to DexCachePair except that we're storing a native pointer
90   // instead of a GC root. See DexCachePair for the details.
NativeDexCachePairNativeDexCachePair91   NativeDexCachePair(T* object, uint32_t index)
92       : object(object),
93         index(index) {}
NativeDexCachePairNativeDexCachePair94   NativeDexCachePair() : object(nullptr), index(0u) { }
95   NativeDexCachePair(const NativeDexCachePair<T>&) = default;
96   NativeDexCachePair& operator=(const NativeDexCachePair<T>&) = default;
97 
98   static void Initialize(std::atomic<NativeDexCachePair<T>>* dex_cache, PointerSize pointer_size);
99 
InvalidIndexForSlotNativeDexCachePair100   static uint32_t InvalidIndexForSlot(uint32_t slot) {
101     // Since the cache size is a power of two, 0 will always map to slot 0.
102     // Use 1 for slot 0 and 0 for all other slots.
103     return (slot == 0) ? 1u : 0u;
104   }
105 
GetObjectForIndexNativeDexCachePair106   T* GetObjectForIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
107     if (idx != index) {
108       return nullptr;
109     }
110     DCHECK(object != nullptr);
111     return object;
112   }
113 };
114 
115 using TypeDexCachePair = DexCachePair<Class>;
116 using TypeDexCacheType = std::atomic<TypeDexCachePair>;
117 
118 using StringDexCachePair = DexCachePair<String>;
119 using StringDexCacheType = std::atomic<StringDexCachePair>;
120 
121 using FieldDexCachePair = NativeDexCachePair<ArtField>;
122 using FieldDexCacheType = std::atomic<FieldDexCachePair>;
123 
124 using MethodDexCachePair = NativeDexCachePair<ArtMethod>;
125 using MethodDexCacheType = std::atomic<MethodDexCachePair>;
126 
127 using MethodTypeDexCachePair = DexCachePair<MethodType>;
128 using MethodTypeDexCacheType = std::atomic<MethodTypeDexCachePair>;
129 
130 // C++ mirror of java.lang.DexCache.
131 class MANAGED DexCache final : public Object {
132  public:
133   // Size of java.lang.DexCache.class.
134   static uint32_t ClassSize(PointerSize pointer_size);
135 
136   // Size of type dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
137   static constexpr size_t kDexCacheTypeCacheSize = 1024;
138   static_assert(IsPowerOfTwo(kDexCacheTypeCacheSize),
139                 "Type dex cache size is not a power of 2.");
140 
141   // Size of string dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
142   static constexpr size_t kDexCacheStringCacheSize = 1024;
143   static_assert(IsPowerOfTwo(kDexCacheStringCacheSize),
144                 "String dex cache size is not a power of 2.");
145 
146   // Size of field dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
147   static constexpr size_t kDexCacheFieldCacheSize = 1024;
148   static_assert(IsPowerOfTwo(kDexCacheFieldCacheSize),
149                 "Field dex cache size is not a power of 2.");
150 
151   // Size of method dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
152   static constexpr size_t kDexCacheMethodCacheSize = 1024;
153   static_assert(IsPowerOfTwo(kDexCacheMethodCacheSize),
154                 "Method dex cache size is not a power of 2.");
155 
156   // Size of method type dex cache. Needs to be a power of 2 for entrypoint assumptions
157   // to hold.
158   static constexpr size_t kDexCacheMethodTypeCacheSize = 1024;
159   static_assert(IsPowerOfTwo(kDexCacheMethodTypeCacheSize),
160                 "MethodType dex cache size is not a power of 2.");
161 
StaticTypeSize()162   static constexpr size_t StaticTypeSize() {
163     return kDexCacheTypeCacheSize;
164   }
165 
StaticStringSize()166   static constexpr size_t StaticStringSize() {
167     return kDexCacheStringCacheSize;
168   }
169 
StaticArtFieldSize()170   static constexpr size_t StaticArtFieldSize() {
171     return kDexCacheFieldCacheSize;
172   }
173 
StaticMethodSize()174   static constexpr size_t StaticMethodSize() {
175     return kDexCacheMethodCacheSize;
176   }
177 
StaticMethodTypeSize()178   static constexpr size_t StaticMethodTypeSize() {
179     return kDexCacheMethodTypeCacheSize;
180   }
181 
182   // Size of an instance of java.lang.DexCache not including referenced values.
InstanceSize()183   static constexpr uint32_t InstanceSize() {
184     return sizeof(DexCache);
185   }
186 
187   static void InitializeDexCache(Thread* self,
188                                  ObjPtr<mirror::DexCache> dex_cache,
189                                  ObjPtr<mirror::String> location,
190                                  const DexFile* dex_file,
191                                  LinearAlloc* linear_alloc,
192                                  PointerSize image_pointer_size)
193       REQUIRES_SHARED(Locks::mutator_lock_)
194       REQUIRES(Locks::dex_lock_);
195 
196   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
197   void FixupStrings(StringDexCacheType* dest, const Visitor& visitor)
198       REQUIRES_SHARED(Locks::mutator_lock_);
199 
200   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
201   void FixupResolvedTypes(TypeDexCacheType* dest, const Visitor& visitor)
202       REQUIRES_SHARED(Locks::mutator_lock_);
203 
204   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
205   void FixupResolvedMethodTypes(MethodTypeDexCacheType* dest, const Visitor& visitor)
206       REQUIRES_SHARED(Locks::mutator_lock_);
207 
208   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
209   void FixupResolvedCallSites(GcRoot<mirror::CallSite>* dest, const Visitor& visitor)
210       REQUIRES_SHARED(Locks::mutator_lock_);
211 
212   ObjPtr<String> GetLocation() REQUIRES_SHARED(Locks::mutator_lock_);
213 
StringsOffset()214   static constexpr MemberOffset StringsOffset() {
215     return OFFSET_OF_OBJECT_MEMBER(DexCache, strings_);
216   }
217 
PreResolvedStringsOffset()218   static constexpr MemberOffset PreResolvedStringsOffset() {
219     return OFFSET_OF_OBJECT_MEMBER(DexCache, preresolved_strings_);
220   }
221 
ResolvedTypesOffset()222   static constexpr MemberOffset ResolvedTypesOffset() {
223     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_);
224   }
225 
ResolvedFieldsOffset()226   static constexpr MemberOffset ResolvedFieldsOffset() {
227     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_fields_);
228   }
229 
ResolvedMethodsOffset()230   static constexpr MemberOffset ResolvedMethodsOffset() {
231     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_methods_);
232   }
233 
ResolvedMethodTypesOffset()234   static constexpr MemberOffset ResolvedMethodTypesOffset() {
235     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_method_types_);
236   }
237 
ResolvedCallSitesOffset()238   static constexpr MemberOffset ResolvedCallSitesOffset() {
239     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_call_sites_);
240   }
241 
NumStringsOffset()242   static constexpr MemberOffset NumStringsOffset() {
243     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_strings_);
244   }
245 
NumPreResolvedStringsOffset()246   static constexpr MemberOffset NumPreResolvedStringsOffset() {
247     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_preresolved_strings_);
248   }
249 
NumResolvedTypesOffset()250   static constexpr MemberOffset NumResolvedTypesOffset() {
251     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_types_);
252   }
253 
NumResolvedFieldsOffset()254   static constexpr MemberOffset NumResolvedFieldsOffset() {
255     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_fields_);
256   }
257 
NumResolvedMethodsOffset()258   static constexpr MemberOffset NumResolvedMethodsOffset() {
259     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_methods_);
260   }
261 
NumResolvedMethodTypesOffset()262   static constexpr MemberOffset NumResolvedMethodTypesOffset() {
263     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_method_types_);
264   }
265 
NumResolvedCallSitesOffset()266   static constexpr MemberOffset NumResolvedCallSitesOffset() {
267     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_call_sites_);
268   }
269 
PreResolvedStringsAlignment()270   static constexpr size_t PreResolvedStringsAlignment() {
271     return alignof(GcRoot<mirror::String>);
272   }
273 
274   String* GetResolvedString(dex::StringIndex string_idx) ALWAYS_INLINE
275       REQUIRES_SHARED(Locks::mutator_lock_);
276 
277   void SetResolvedString(dex::StringIndex string_idx, ObjPtr<mirror::String> resolved) ALWAYS_INLINE
278       REQUIRES_SHARED(Locks::mutator_lock_);
279 
280   void SetPreResolvedString(dex::StringIndex string_idx,
281                             ObjPtr<mirror::String> resolved)
282       ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
283 
284   // Clear the preresolved string cache to prevent further usage.
285   void ClearPreResolvedStrings()
286       ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
287 
288   // Clear a string for a string_idx, used to undo string intern transactions to make sure
289   // the string isn't kept live.
290   void ClearString(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
291 
292   Class* GetResolvedType(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
293 
294   void SetResolvedType(dex::TypeIndex type_idx, ObjPtr<Class> resolved)
295       REQUIRES_SHARED(Locks::mutator_lock_);
296 
297   void ClearResolvedType(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
298 
299   ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx, PointerSize ptr_size)
300       REQUIRES_SHARED(Locks::mutator_lock_);
301 
302   ALWAYS_INLINE void SetResolvedMethod(uint32_t method_idx,
303                                        ArtMethod* resolved,
304                                        PointerSize ptr_size)
305       REQUIRES_SHARED(Locks::mutator_lock_);
306   ALWAYS_INLINE void ClearResolvedMethod(uint32_t method_idx, PointerSize ptr_size)
307       REQUIRES_SHARED(Locks::mutator_lock_);
308 
309   // Pointer sized variant, used for patching.
310   ALWAYS_INLINE ArtField* GetResolvedField(uint32_t idx, PointerSize ptr_size)
311       REQUIRES_SHARED(Locks::mutator_lock_);
312 
313   // Pointer sized variant, used for patching.
314   ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field, PointerSize ptr_size)
315       REQUIRES_SHARED(Locks::mutator_lock_);
316   ALWAYS_INLINE void ClearResolvedField(uint32_t idx, PointerSize ptr_size)
317       REQUIRES_SHARED(Locks::mutator_lock_);
318 
319   MethodType* GetResolvedMethodType(dex::ProtoIndex proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
320 
321   void SetResolvedMethodType(dex::ProtoIndex proto_idx, MethodType* resolved)
322       REQUIRES_SHARED(Locks::mutator_lock_);
323 
324   CallSite* GetResolvedCallSite(uint32_t call_site_idx) REQUIRES_SHARED(Locks::mutator_lock_);
325 
326   // Attempts to bind |call_site_idx| to the call site |resolved|. The
327   // caller must use the return value in place of |resolved|. This is
328   // because multiple threads can invoke the bootstrap method each
329   // producing a call site, but the method handle invocation on the
330   // call site must be on a common agreed value.
331   ObjPtr<CallSite> SetResolvedCallSite(uint32_t call_site_idx, ObjPtr<CallSite> resolved)
332       REQUIRES_SHARED(Locks::mutator_lock_) WARN_UNUSED;
333 
334   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
GetStrings()335   StringDexCacheType* GetStrings() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
336     return GetFieldPtr64<StringDexCacheType*, kVerifyFlags>(StringsOffset());
337   }
338 
339   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
GetPreResolvedStrings()340   GcRoot<mirror::String>* GetPreResolvedStrings() ALWAYS_INLINE
341       REQUIRES_SHARED(Locks::mutator_lock_) {
342     return GetFieldPtr64<GcRoot<mirror::String>*, kVerifyFlags>(PreResolvedStringsOffset());
343   }
344 
SetStrings(StringDexCacheType * strings)345   void SetStrings(StringDexCacheType* strings) ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
346     SetFieldPtr<false>(StringsOffset(), strings);
347   }
348 
SetPreResolvedStrings(GcRoot<mirror::String> * strings)349   void SetPreResolvedStrings(GcRoot<mirror::String>* strings)
350       ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
351     SetFieldPtr<false>(PreResolvedStringsOffset(), strings);
352   }
353 
354   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
GetResolvedTypes()355   TypeDexCacheType* GetResolvedTypes() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
356     return GetFieldPtr<TypeDexCacheType*, kVerifyFlags>(ResolvedTypesOffset());
357   }
358 
SetResolvedTypes(TypeDexCacheType * resolved_types)359   void SetResolvedTypes(TypeDexCacheType* resolved_types)
360       ALWAYS_INLINE
361       REQUIRES_SHARED(Locks::mutator_lock_) {
362     SetFieldPtr<false>(ResolvedTypesOffset(), resolved_types);
363   }
364 
GetResolvedMethods()365   MethodDexCacheType* GetResolvedMethods() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
366     return GetFieldPtr<MethodDexCacheType*>(ResolvedMethodsOffset());
367   }
368 
SetResolvedMethods(MethodDexCacheType * resolved_methods)369   void SetResolvedMethods(MethodDexCacheType* resolved_methods)
370       ALWAYS_INLINE
371       REQUIRES_SHARED(Locks::mutator_lock_) {
372     SetFieldPtr<false>(ResolvedMethodsOffset(), resolved_methods);
373   }
374 
GetResolvedFields()375   FieldDexCacheType* GetResolvedFields() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
376     return GetFieldPtr<FieldDexCacheType*>(ResolvedFieldsOffset());
377   }
378 
SetResolvedFields(FieldDexCacheType * resolved_fields)379   void SetResolvedFields(FieldDexCacheType* resolved_fields)
380       ALWAYS_INLINE
381       REQUIRES_SHARED(Locks::mutator_lock_) {
382     SetFieldPtr<false>(ResolvedFieldsOffset(), resolved_fields);
383   }
384 
385   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
GetResolvedMethodTypes()386   MethodTypeDexCacheType* GetResolvedMethodTypes()
387       ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
388     return GetFieldPtr64<MethodTypeDexCacheType*, kVerifyFlags>(ResolvedMethodTypesOffset());
389   }
390 
SetResolvedMethodTypes(MethodTypeDexCacheType * resolved_method_types)391   void SetResolvedMethodTypes(MethodTypeDexCacheType* resolved_method_types)
392       ALWAYS_INLINE
393       REQUIRES_SHARED(Locks::mutator_lock_) {
394     SetFieldPtr<false>(ResolvedMethodTypesOffset(), resolved_method_types);
395   }
396 
397   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
GetResolvedCallSites()398   GcRoot<CallSite>* GetResolvedCallSites()
399       ALWAYS_INLINE
400       REQUIRES_SHARED(Locks::mutator_lock_) {
401     return GetFieldPtr<GcRoot<CallSite>*, kVerifyFlags>(ResolvedCallSitesOffset());
402   }
403 
SetResolvedCallSites(GcRoot<CallSite> * resolved_call_sites)404   void SetResolvedCallSites(GcRoot<CallSite>* resolved_call_sites)
405       ALWAYS_INLINE
406       REQUIRES_SHARED(Locks::mutator_lock_) {
407     SetFieldPtr<false>(ResolvedCallSitesOffset(), resolved_call_sites);
408   }
409 
410   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
NumStrings()411   size_t NumStrings() REQUIRES_SHARED(Locks::mutator_lock_) {
412     return GetField32<kVerifyFlags>(NumStringsOffset());
413   }
414 
415   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
NumPreResolvedStrings()416   size_t NumPreResolvedStrings() REQUIRES_SHARED(Locks::mutator_lock_) {
417     return GetField32<kVerifyFlags>(NumPreResolvedStringsOffset());
418   }
419 
420   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
NumResolvedTypes()421   size_t NumResolvedTypes() REQUIRES_SHARED(Locks::mutator_lock_) {
422     return GetField32<kVerifyFlags>(NumResolvedTypesOffset());
423   }
424 
425   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
NumResolvedMethods()426   size_t NumResolvedMethods() REQUIRES_SHARED(Locks::mutator_lock_) {
427     return GetField32<kVerifyFlags>(NumResolvedMethodsOffset());
428   }
429 
430   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
NumResolvedFields()431   size_t NumResolvedFields() REQUIRES_SHARED(Locks::mutator_lock_) {
432     return GetField32<kVerifyFlags>(NumResolvedFieldsOffset());
433   }
434 
435   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
NumResolvedMethodTypes()436   size_t NumResolvedMethodTypes() REQUIRES_SHARED(Locks::mutator_lock_) {
437     return GetField32<kVerifyFlags>(NumResolvedMethodTypesOffset());
438   }
439 
440   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
NumResolvedCallSites()441   size_t NumResolvedCallSites() REQUIRES_SHARED(Locks::mutator_lock_) {
442     return GetField32<kVerifyFlags>(NumResolvedCallSitesOffset());
443   }
444 
GetDexFile()445   const DexFile* GetDexFile() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
446     return GetFieldPtr<const DexFile*>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_));
447   }
448 
SetDexFile(const DexFile * dex_file)449   void SetDexFile(const DexFile* dex_file) REQUIRES_SHARED(Locks::mutator_lock_) {
450     SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file);
451   }
452 
453   void SetLocation(ObjPtr<String> location) REQUIRES_SHARED(Locks::mutator_lock_);
454 
455   template <typename T>
456   static NativeDexCachePair<T> GetNativePairPtrSize(std::atomic<NativeDexCachePair<T>>* pair_array,
457                                                     size_t idx,
458                                                     PointerSize ptr_size);
459 
460   template <typename T>
461   static void SetNativePairPtrSize(std::atomic<NativeDexCachePair<T>>* pair_array,
462                                    size_t idx,
463                                    NativeDexCachePair<T> pair,
464                                    PointerSize ptr_size);
465 
PreResolvedStringsSize(size_t num_strings)466   static size_t PreResolvedStringsSize(size_t num_strings) {
467     return sizeof(GcRoot<mirror::String>) * num_strings;
468   }
469 
470   uint32_t StringSlotIndex(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
471   uint32_t TypeSlotIndex(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
472   uint32_t FieldSlotIndex(uint32_t field_idx) REQUIRES_SHARED(Locks::mutator_lock_);
473   uint32_t MethodSlotIndex(uint32_t method_idx) REQUIRES_SHARED(Locks::mutator_lock_);
474   uint32_t MethodTypeSlotIndex(dex::ProtoIndex proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
475 
476   // Returns true if we succeeded in adding the pre-resolved string array.
477   bool AddPreResolvedStringsArray() REQUIRES_SHARED(Locks::mutator_lock_);
478 
479  private:
480   void Init(const DexFile* dex_file,
481             ObjPtr<String> location,
482             StringDexCacheType* strings,
483             uint32_t num_strings,
484             TypeDexCacheType* resolved_types,
485             uint32_t num_resolved_types,
486             MethodDexCacheType* resolved_methods,
487             uint32_t num_resolved_methods,
488             FieldDexCacheType* resolved_fields,
489             uint32_t num_resolved_fields,
490             MethodTypeDexCacheType* resolved_method_types,
491             uint32_t num_resolved_method_types,
492             GcRoot<CallSite>* resolved_call_sites,
493             uint32_t num_resolved_call_sites)
494       REQUIRES_SHARED(Locks::mutator_lock_);
495 
496   // std::pair<> is not trivially copyable and as such it is unsuitable for atomic operations,
497   // so we use a custom pair class for loading and storing the NativeDexCachePair<>.
498   template <typename IntType>
499   struct PACKED(2 * sizeof(IntType)) ConversionPair {
ConversionPairConversionPair500     ConversionPair(IntType f, IntType s) : first(f), second(s) { }
501     ConversionPair(const ConversionPair&) = default;
502     ConversionPair& operator=(const ConversionPair&) = default;
503     IntType first;
504     IntType second;
505   };
506   using ConversionPair32 = ConversionPair<uint32_t>;
507   using ConversionPair64 = ConversionPair<uint64_t>;
508 
509   // Visit instance fields of the dex cache as well as its associated arrays.
510   template <bool kVisitNativeRoots,
511             VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
512             ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
513             typename Visitor>
514   void VisitReferences(ObjPtr<Class> klass, const Visitor& visitor)
515       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
516 
517   // Due to lack of 16-byte atomics support, we use hand-crafted routines.
518 #if defined(__aarch64__) || defined(__mips__)
519   // 16-byte atomics are supported on aarch64, mips and mips64.
AtomicLoadRelaxed16B(std::atomic<ConversionPair64> * target)520   ALWAYS_INLINE static ConversionPair64 AtomicLoadRelaxed16B(
521       std::atomic<ConversionPair64>* target) {
522     return target->load(std::memory_order_relaxed);
523   }
524 
AtomicStoreRelease16B(std::atomic<ConversionPair64> * target,ConversionPair64 value)525   ALWAYS_INLINE static void AtomicStoreRelease16B(
526       std::atomic<ConversionPair64>* target, ConversionPair64 value) {
527     target->store(value, std::memory_order_release);
528   }
529 #elif defined(__x86_64__)
AtomicLoadRelaxed16B(std::atomic<ConversionPair64> * target)530   ALWAYS_INLINE static ConversionPair64 AtomicLoadRelaxed16B(
531       std::atomic<ConversionPair64>* target) {
532     uint64_t first, second;
533     __asm__ __volatile__(
534         "lock cmpxchg16b (%2)"
535         : "=&a"(first), "=&d"(second)
536         : "r"(target), "a"(0), "d"(0), "b"(0), "c"(0)
537         : "cc");
538     return ConversionPair64(first, second);
539   }
540 
AtomicStoreRelease16B(std::atomic<ConversionPair64> * target,ConversionPair64 value)541   ALWAYS_INLINE static void AtomicStoreRelease16B(
542       std::atomic<ConversionPair64>* target, ConversionPair64 value) {
543     uint64_t first, second;
544     __asm__ __volatile__ (
545         "movq (%2), %%rax\n\t"
546         "movq 8(%2), %%rdx\n\t"
547         "1:\n\t"
548         "lock cmpxchg16b (%2)\n\t"
549         "jnz 1b"
550         : "=&a"(first), "=&d"(second)
551         : "r"(target), "b"(value.first), "c"(value.second)
552         : "cc");
553   }
554 #else
555   static ConversionPair64 AtomicLoadRelaxed16B(std::atomic<ConversionPair64>* target);
556   static void AtomicStoreRelease16B(std::atomic<ConversionPair64>* target, ConversionPair64 value);
557 #endif
558 
559   HeapReference<String> location_;
560   // Number of elements in the preresolved_strings_ array. Note that this appears here because of
561   // our packing logic for 32 bit fields.
562   uint32_t num_preresolved_strings_;
563 
564   uint64_t dex_file_;                // const DexFile*
565   uint64_t preresolved_strings_;     // GcRoot<mirror::String*> array with num_preresolved_strings
566                                      // elements.
567   uint64_t resolved_call_sites_;     // GcRoot<CallSite>* array with num_resolved_call_sites_
568                                      // elements.
569   uint64_t resolved_fields_;         // std::atomic<FieldDexCachePair>*, array with
570                                      // num_resolved_fields_ elements.
571   uint64_t resolved_method_types_;   // std::atomic<MethodTypeDexCachePair>* array with
572                                      // num_resolved_method_types_ elements.
573   uint64_t resolved_methods_;        // ArtMethod*, array with num_resolved_methods_ elements.
574   uint64_t resolved_types_;          // TypeDexCacheType*, array with num_resolved_types_ elements.
575   uint64_t strings_;                 // std::atomic<StringDexCachePair>*, array with num_strings_
576                                      // elements.
577 
578   uint32_t num_resolved_call_sites_;    // Number of elements in the call_sites_ array.
579   uint32_t num_resolved_fields_;        // Number of elements in the resolved_fields_ array.
580   uint32_t num_resolved_method_types_;  // Number of elements in the resolved_method_types_ array.
581   uint32_t num_resolved_methods_;       // Number of elements in the resolved_methods_ array.
582   uint32_t num_resolved_types_;         // Number of elements in the resolved_types_ array.
583   uint32_t num_strings_;                // Number of elements in the strings_ array.
584 
585   friend struct art::DexCacheOffsets;  // for verifying offset information
586   friend class linker::ImageWriter;
587   friend class Object;  // For VisitReferences
588   DISALLOW_IMPLICIT_CONSTRUCTORS(DexCache);
589 };
590 
591 }  // namespace mirror
592 }  // namespace art
593 
594 #endif  // ART_RUNTIME_MIRROR_DEX_CACHE_H_
595