1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "dex_cache-inl.h"
18
19 #include "art_method-inl.h"
20 #include "class_linker.h"
21 #include "gc/accounting/card_table-inl.h"
22 #include "gc/heap.h"
23 #include "linear_alloc.h"
24 #include "oat_file.h"
25 #include "object-inl.h"
26 #include "object.h"
27 #include "object_array-inl.h"
28 #include "runtime.h"
29 #include "runtime_globals.h"
30 #include "string.h"
31 #include "thread.h"
32 #include "utils/dex_cache_arrays_layout-inl.h"
33
34 namespace art {
35 namespace mirror {
36
InitializeDexCache(Thread * self,ObjPtr<mirror::DexCache> dex_cache,ObjPtr<mirror::String> location,const DexFile * dex_file,LinearAlloc * linear_alloc,PointerSize image_pointer_size)37 void DexCache::InitializeDexCache(Thread* self,
38 ObjPtr<mirror::DexCache> dex_cache,
39 ObjPtr<mirror::String> location,
40 const DexFile* dex_file,
41 LinearAlloc* linear_alloc,
42 PointerSize image_pointer_size) {
43 DCHECK(dex_file != nullptr);
44 ScopedAssertNoThreadSuspension sants(__FUNCTION__);
45 DexCacheArraysLayout layout(image_pointer_size, dex_file);
46 uint8_t* raw_arrays = nullptr;
47
48 if (dex_file->NumStringIds() != 0u ||
49 dex_file->NumTypeIds() != 0u ||
50 dex_file->NumMethodIds() != 0u ||
51 dex_file->NumFieldIds() != 0u) {
52 static_assert(ArenaAllocator::kAlignment == 8, "Expecting arena alignment of 8.");
53 DCHECK(layout.Alignment() == 8u || layout.Alignment() == 16u);
54 // Zero-initialized.
55 raw_arrays = (layout.Alignment() == 16u)
56 ? reinterpret_cast<uint8_t*>(linear_alloc->AllocAlign16(self, layout.Size()))
57 : reinterpret_cast<uint8_t*>(linear_alloc->Alloc(self, layout.Size()));
58 }
59
60 StringDexCacheType* strings = (dex_file->NumStringIds() == 0u) ? nullptr :
61 reinterpret_cast<StringDexCacheType*>(raw_arrays + layout.StringsOffset());
62 TypeDexCacheType* types = (dex_file->NumTypeIds() == 0u) ? nullptr :
63 reinterpret_cast<TypeDexCacheType*>(raw_arrays + layout.TypesOffset());
64 MethodDexCacheType* methods = (dex_file->NumMethodIds() == 0u) ? nullptr :
65 reinterpret_cast<MethodDexCacheType*>(raw_arrays + layout.MethodsOffset());
66 FieldDexCacheType* fields = (dex_file->NumFieldIds() == 0u) ? nullptr :
67 reinterpret_cast<FieldDexCacheType*>(raw_arrays + layout.FieldsOffset());
68
69 size_t num_strings = kDexCacheStringCacheSize;
70 if (dex_file->NumStringIds() < num_strings) {
71 num_strings = dex_file->NumStringIds();
72 }
73 size_t num_types = kDexCacheTypeCacheSize;
74 if (dex_file->NumTypeIds() < num_types) {
75 num_types = dex_file->NumTypeIds();
76 }
77 size_t num_fields = kDexCacheFieldCacheSize;
78 if (dex_file->NumFieldIds() < num_fields) {
79 num_fields = dex_file->NumFieldIds();
80 }
81 size_t num_methods = kDexCacheMethodCacheSize;
82 if (dex_file->NumMethodIds() < num_methods) {
83 num_methods = dex_file->NumMethodIds();
84 }
85
86 // Note that we allocate the method type dex caches regardless of this flag,
87 // and we make sure here that they're not used by the runtime. This is in the
88 // interest of simplicity and to avoid extensive compiler and layout class changes.
89 //
90 // If this needs to be mitigated in a production system running this code,
91 // DexCache::kDexCacheMethodTypeCacheSize can be set to zero.
92 MethodTypeDexCacheType* method_types = nullptr;
93 size_t num_method_types = 0;
94
95 if (dex_file->NumProtoIds() < kDexCacheMethodTypeCacheSize) {
96 num_method_types = dex_file->NumProtoIds();
97 } else {
98 num_method_types = kDexCacheMethodTypeCacheSize;
99 }
100
101 if (num_method_types > 0) {
102 method_types = reinterpret_cast<MethodTypeDexCacheType*>(
103 raw_arrays + layout.MethodTypesOffset());
104 }
105
106 GcRoot<mirror::CallSite>* call_sites = (dex_file->NumCallSiteIds() == 0)
107 ? nullptr
108 : reinterpret_cast<GcRoot<CallSite>*>(raw_arrays + layout.CallSitesOffset());
109
110 DCHECK_ALIGNED(raw_arrays, alignof(StringDexCacheType)) <<
111 "Expected raw_arrays to align to StringDexCacheType.";
112 DCHECK_ALIGNED(layout.StringsOffset(), alignof(StringDexCacheType)) <<
113 "Expected StringsOffset() to align to StringDexCacheType.";
114 DCHECK_ALIGNED(strings, alignof(StringDexCacheType)) <<
115 "Expected strings to align to StringDexCacheType.";
116 static_assert(alignof(StringDexCacheType) == 8u,
117 "Expected StringDexCacheType to have align of 8.");
118 if (kIsDebugBuild) {
119 // Sanity check to make sure all the dex cache arrays are empty. b/28992179
120 for (size_t i = 0; i < num_strings; ++i) {
121 CHECK_EQ(strings[i].load(std::memory_order_relaxed).index, 0u);
122 CHECK(strings[i].load(std::memory_order_relaxed).object.IsNull());
123 }
124 for (size_t i = 0; i < num_types; ++i) {
125 CHECK_EQ(types[i].load(std::memory_order_relaxed).index, 0u);
126 CHECK(types[i].load(std::memory_order_relaxed).object.IsNull());
127 }
128 for (size_t i = 0; i < num_methods; ++i) {
129 CHECK_EQ(GetNativePairPtrSize(methods, i, image_pointer_size).index, 0u);
130 CHECK(GetNativePairPtrSize(methods, i, image_pointer_size).object == nullptr);
131 }
132 for (size_t i = 0; i < num_fields; ++i) {
133 CHECK_EQ(GetNativePairPtrSize(fields, i, image_pointer_size).index, 0u);
134 CHECK(GetNativePairPtrSize(fields, i, image_pointer_size).object == nullptr);
135 }
136 for (size_t i = 0; i < num_method_types; ++i) {
137 CHECK_EQ(method_types[i].load(std::memory_order_relaxed).index, 0u);
138 CHECK(method_types[i].load(std::memory_order_relaxed).object.IsNull());
139 }
140 for (size_t i = 0; i < dex_file->NumCallSiteIds(); ++i) {
141 CHECK(call_sites[i].IsNull());
142 }
143 }
144 if (strings != nullptr) {
145 mirror::StringDexCachePair::Initialize(strings);
146 }
147 if (types != nullptr) {
148 mirror::TypeDexCachePair::Initialize(types);
149 }
150 if (fields != nullptr) {
151 mirror::FieldDexCachePair::Initialize(fields, image_pointer_size);
152 }
153 if (methods != nullptr) {
154 mirror::MethodDexCachePair::Initialize(methods, image_pointer_size);
155 }
156 if (method_types != nullptr) {
157 mirror::MethodTypeDexCachePair::Initialize(method_types);
158 }
159 dex_cache->Init(dex_file,
160 location,
161 strings,
162 num_strings,
163 types,
164 num_types,
165 methods,
166 num_methods,
167 fields,
168 num_fields,
169 method_types,
170 num_method_types,
171 call_sites,
172 dex_file->NumCallSiteIds());
173 }
174
AddPreResolvedStringsArray()175 bool DexCache::AddPreResolvedStringsArray() {
176 DCHECK_EQ(NumPreResolvedStrings(), 0u);
177 Thread* const self = Thread::Current();
178 LinearAlloc* linear_alloc = Runtime::Current()->GetLinearAlloc();
179 const size_t num_strings = GetDexFile()->NumStringIds();
180 GcRoot<mirror::String>* strings =
181 linear_alloc->AllocArray<GcRoot<mirror::String>>(self, num_strings);
182 if (strings == nullptr) {
183 // Failed to allocate pre-resolved string array (probably due to address fragmentation), bail.
184 return false;
185 }
186 SetField32<false>(NumPreResolvedStringsOffset(), num_strings);
187
188 CHECK(strings != nullptr);
189 SetPreResolvedStrings(strings);
190 for (size_t i = 0; i < GetDexFile()->NumStringIds(); ++i) {
191 CHECK(GetPreResolvedStrings()[i].Read() == nullptr);
192 }
193 return true;
194 }
195
Init(const DexFile * dex_file,ObjPtr<String> location,StringDexCacheType * strings,uint32_t num_strings,TypeDexCacheType * resolved_types,uint32_t num_resolved_types,MethodDexCacheType * resolved_methods,uint32_t num_resolved_methods,FieldDexCacheType * resolved_fields,uint32_t num_resolved_fields,MethodTypeDexCacheType * resolved_method_types,uint32_t num_resolved_method_types,GcRoot<CallSite> * resolved_call_sites,uint32_t num_resolved_call_sites)196 void DexCache::Init(const DexFile* dex_file,
197 ObjPtr<String> location,
198 StringDexCacheType* strings,
199 uint32_t num_strings,
200 TypeDexCacheType* resolved_types,
201 uint32_t num_resolved_types,
202 MethodDexCacheType* resolved_methods,
203 uint32_t num_resolved_methods,
204 FieldDexCacheType* resolved_fields,
205 uint32_t num_resolved_fields,
206 MethodTypeDexCacheType* resolved_method_types,
207 uint32_t num_resolved_method_types,
208 GcRoot<CallSite>* resolved_call_sites,
209 uint32_t num_resolved_call_sites) {
210 CHECK(dex_file != nullptr);
211 CHECK(location != nullptr);
212 CHECK_EQ(num_strings != 0u, strings != nullptr);
213 CHECK_EQ(num_resolved_types != 0u, resolved_types != nullptr);
214 CHECK_EQ(num_resolved_methods != 0u, resolved_methods != nullptr);
215 CHECK_EQ(num_resolved_fields != 0u, resolved_fields != nullptr);
216 CHECK_EQ(num_resolved_method_types != 0u, resolved_method_types != nullptr);
217 CHECK_EQ(num_resolved_call_sites != 0u, resolved_call_sites != nullptr);
218
219 SetDexFile(dex_file);
220 SetLocation(location);
221 SetStrings(strings);
222 SetResolvedTypes(resolved_types);
223 SetResolvedMethods(resolved_methods);
224 SetResolvedFields(resolved_fields);
225 SetResolvedMethodTypes(resolved_method_types);
226 SetResolvedCallSites(resolved_call_sites);
227 SetField32<false>(NumStringsOffset(), num_strings);
228 SetField32<false>(NumResolvedTypesOffset(), num_resolved_types);
229 SetField32<false>(NumResolvedMethodsOffset(), num_resolved_methods);
230 SetField32<false>(NumResolvedFieldsOffset(), num_resolved_fields);
231 SetField32<false>(NumResolvedMethodTypesOffset(), num_resolved_method_types);
232 SetField32<false>(NumResolvedCallSitesOffset(), num_resolved_call_sites);
233 }
234
SetLocation(ObjPtr<mirror::String> location)235 void DexCache::SetLocation(ObjPtr<mirror::String> location) {
236 SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_), location);
237 }
238
239 #if !defined(__aarch64__) && !defined(__x86_64__) && !defined(__mips__)
240 static pthread_mutex_t dex_cache_slow_atomic_mutex = PTHREAD_MUTEX_INITIALIZER;
241
AtomicLoadRelaxed16B(std::atomic<ConversionPair64> * target)242 DexCache::ConversionPair64 DexCache::AtomicLoadRelaxed16B(std::atomic<ConversionPair64>* target) {
243 pthread_mutex_lock(&dex_cache_slow_atomic_mutex);
244 DexCache::ConversionPair64 value = *reinterpret_cast<ConversionPair64*>(target);
245 pthread_mutex_unlock(&dex_cache_slow_atomic_mutex);
246 return value;
247 }
248
AtomicStoreRelease16B(std::atomic<ConversionPair64> * target,ConversionPair64 value)249 void DexCache::AtomicStoreRelease16B(std::atomic<ConversionPair64>* target,
250 ConversionPair64 value) {
251 pthread_mutex_lock(&dex_cache_slow_atomic_mutex);
252 *reinterpret_cast<ConversionPair64*>(target) = value;
253 pthread_mutex_unlock(&dex_cache_slow_atomic_mutex);
254 }
255 #endif
256
257 } // namespace mirror
258 } // namespace art
259