• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "jit_code_cache.h"
18 
19 #include <sstream>
20 
21 #include <android-base/logging.h>
22 
23 #include "arch/context.h"
24 #include "art_method-inl.h"
25 #include "base/histogram-inl.h"
26 #include "base/logging.h"  // For VLOG.
27 #include "base/membarrier.h"
28 #include "base/memfd.h"
29 #include "base/mem_map.h"
30 #include "base/pointer_size.h"
31 #include "base/quasi_atomic.h"
32 #include "base/stl_util.h"
33 #include "base/systrace.h"
34 #include "base/time_utils.h"
35 #include "base/utils.h"
36 #include "cha.h"
37 #include "debugger_interface.h"
38 #include "dex/dex_file_loader.h"
39 #include "dex/method_reference.h"
40 #include "entrypoints/entrypoint_utils-inl.h"
41 #include "entrypoints/runtime_asm_entrypoints.h"
42 #include "gc/accounting/bitmap-inl.h"
43 #include "gc/allocator/art-dlmalloc.h"
44 #include "gc/scoped_gc_critical_section.h"
45 #include "handle.h"
46 #include "handle_scope-inl.h"
47 #include "instrumentation.h"
48 #include "intern_table.h"
49 #include "jit/jit.h"
50 #include "jit/profiling_info.h"
51 #include "jit/jit_scoped_code_cache_write.h"
52 #include "linear_alloc.h"
53 #include "oat/oat_file-inl.h"
54 #include "oat/oat_quick_method_header.h"
55 #include "object_callbacks.h"
56 #include "profile/profile_compilation_info.h"
57 #include "scoped_thread_state_change-inl.h"
58 #include "stack.h"
59 #include "thread-current-inl.h"
60 #include "thread-inl.h"
61 #include "thread_list.h"
62 
63 namespace art HIDDEN {
64 namespace jit {
65 
66 static constexpr size_t kCodeSizeLogThreshold = 50 * KB;
67 static constexpr size_t kStackMapSizeLogThreshold = 50 * KB;
68 
69 class JitCodeCache::JniStubKey {
70  public:
REQUIRES_SHARED(Locks::mutator_lock_)71   explicit JniStubKey(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_)
72       : shorty_(method->GetShorty()),
73         is_static_(method->IsStatic()),
74         is_fast_native_(method->IsFastNative()),
75         is_critical_native_(method->IsCriticalNative()),
76         is_synchronized_(method->IsSynchronized()) {
77     DCHECK(!(is_fast_native_ && is_critical_native_));
78   }
79 
operator <(const JniStubKey & rhs) const80   bool operator<(const JniStubKey& rhs) const {
81     if (is_static_ != rhs.is_static_) {
82       return rhs.is_static_;
83     }
84     if (is_synchronized_ != rhs.is_synchronized_) {
85       return rhs.is_synchronized_;
86     }
87     if (is_fast_native_ != rhs.is_fast_native_) {
88       return rhs.is_fast_native_;
89     }
90     if (is_critical_native_ != rhs.is_critical_native_) {
91       return rhs.is_critical_native_;
92     }
93     return strcmp(shorty_, rhs.shorty_) < 0;
94   }
95 
96   // Update the shorty to point to another method's shorty. Call this function when removing
97   // the method that references the old shorty from JniCodeData and not removing the entire
98   // JniCodeData; the old shorty may become a dangling pointer when that method is unloaded.
UpdateShorty(ArtMethod * method) const99   void UpdateShorty(ArtMethod* method) const REQUIRES_SHARED(Locks::mutator_lock_) {
100     const char* shorty = method->GetShorty();
101     DCHECK_STREQ(shorty_, shorty);
102     shorty_ = shorty;
103   }
104 
105  private:
106   // The shorty points to a DexFile data and may need to change
107   // to point to the same shorty in a different DexFile.
108   mutable const char* shorty_;
109 
110   const bool is_static_;
111   const bool is_fast_native_;
112   const bool is_critical_native_;
113   const bool is_synchronized_;
114 };
115 
116 class JitCodeCache::JniStubData {
117  public:
JniStubData()118   JniStubData() : code_(nullptr), methods_() {}
119 
SetCode(const void * code)120   void SetCode(const void* code) {
121     DCHECK(code != nullptr);
122     code_ = code;
123   }
124 
UpdateEntryPoints(const void * entrypoint)125   void UpdateEntryPoints(const void* entrypoint) REQUIRES_SHARED(Locks::mutator_lock_) {
126     DCHECK(IsCompiled());
127     DCHECK(entrypoint == OatQuickMethodHeader::FromCodePointer(GetCode())->GetEntryPoint());
128     instrumentation::Instrumentation* instrum = Runtime::Current()->GetInstrumentation();
129     for (ArtMethod* m : GetMethods()) {
130       // Because `m` might be in the process of being deleted,
131       //   - use the `ArtMethod::StillNeedsClinitCheckMayBeDead()` to check if
132       //     we can update the entrypoint, and
133       //   - call `Instrumentation::UpdateNativeMethodsCodeToJitCode` instead of the
134       //     more generic function `Instrumentation::UpdateMethodsCode()`.
135       // The `ArtMethod::StillNeedsClinitCheckMayBeDead()` checks the class status
136       // in the to-space object if any even if the method's declaring class points to
137       // the from-space class object. This way we do not miss updating an entrypoint
138       // even under uncommon circumstances, when during a GC the class becomes visibly
139       // initialized, the method becomes hot, we compile the thunk and want to update
140       // the entrypoint while the method's declaring class field still points to the
141       // from-space class object with the old status.
142       if (!m->StillNeedsClinitCheckMayBeDead()) {
143         instrum->UpdateNativeMethodsCodeToJitCode(m, entrypoint);
144       }
145     }
146   }
147 
GetCode() const148   const void* GetCode() const {
149     return code_;
150   }
151 
IsCompiled() const152   bool IsCompiled() const {
153     return GetCode() != nullptr;
154   }
155 
AddMethod(ArtMethod * method)156   void AddMethod(ArtMethod* method) {
157     if (!ContainsElement(methods_, method)) {
158       methods_.push_back(method);
159     }
160   }
161 
GetMethods() const162   const std::vector<ArtMethod*>& GetMethods() const {
163     return methods_;
164   }
165 
RemoveMethodsIn(const LinearAlloc & alloc)166   void RemoveMethodsIn(const LinearAlloc& alloc) REQUIRES_SHARED(Locks::mutator_lock_) {
167     auto kept_end = std::partition(
168         methods_.begin(),
169         methods_.end(),
170         [&alloc](ArtMethod* method) { return !alloc.ContainsUnsafe(method); });
171     for (auto it = kept_end; it != methods_.end(); it++) {
172       VLOG(jit) << "JIT removed (JNI) " << (*it)->PrettyMethod() << ": " << code_;
173     }
174     methods_.erase(kept_end, methods_.end());
175   }
176 
RemoveMethod(ArtMethod * method)177   bool RemoveMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
178     auto it = std::find(methods_.begin(), methods_.end(), method);
179     if (it != methods_.end()) {
180       VLOG(jit) << "JIT removed (JNI) " << (*it)->PrettyMethod() << ": " << code_;
181       methods_.erase(it);
182       return true;
183     } else {
184       return false;
185     }
186   }
187 
MoveObsoleteMethod(ArtMethod * old_method,ArtMethod * new_method)188   void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) {
189     std::replace(methods_.begin(), methods_.end(), old_method, new_method);
190   }
191 
192  private:
193   const void* code_;
194   std::vector<ArtMethod*> methods_;
195 };
196 
Create(bool used_only_for_profile_data,bool rwx_memory_allowed,bool is_zygote,std::string * error_msg)197 JitCodeCache* JitCodeCache::Create(bool used_only_for_profile_data,
198                                    bool rwx_memory_allowed,
199                                    bool is_zygote,
200                                    std::string* error_msg) {
201   // Register for membarrier expedited sync core if JIT will be generating code.
202   if (!used_only_for_profile_data) {
203     if (art::membarrier(art::MembarrierCommand::kRegisterPrivateExpeditedSyncCore) != 0) {
204       // MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE ensures that CPU instruction pipelines are
205       // flushed and it's used when adding code to the JIT. The memory used by the new code may
206       // have just been released and, in theory, the old code could still be in a pipeline.
207       VLOG(jit) << "Kernel does not support membarrier sync-core";
208     }
209   }
210 
211   Runtime* runtime = Runtime::Current();
212   size_t initial_capacity = runtime->GetJITOptions()->GetCodeCacheInitialCapacity();
213   // Check whether the provided max capacity in options is below 1GB.
214   size_t max_capacity = runtime->GetJITOptions()->GetCodeCacheMaxCapacity();
215   // We need to have 32 bit offsets from method headers in code cache which point to things
216   // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
217   // Ensure we're below 1 GB to be safe.
218   if (max_capacity > 1 * GB) {
219     std::ostringstream oss;
220     oss << "Maxium code cache capacity is limited to 1 GB, "
221         << PrettySize(max_capacity) << " is too big";
222     *error_msg = oss.str();
223     return nullptr;
224   }
225 
226   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
227   JitMemoryRegion region;
228   if (!region.Initialize(initial_capacity,
229                          max_capacity,
230                          rwx_memory_allowed,
231                          is_zygote,
232                          error_msg)) {
233     return nullptr;
234   }
235 
236   if (region.HasCodeMapping()) {
237     const MemMap* exec_pages = region.GetExecPages();
238     runtime->AddGeneratedCodeRange(exec_pages->Begin(), exec_pages->Size());
239   }
240 
241   std::unique_ptr<JitCodeCache> jit_code_cache(new JitCodeCache());
242   if (is_zygote) {
243     // Zygote should never collect code to share the memory with the children.
244     jit_code_cache->garbage_collect_code_ = false;
245     jit_code_cache->shared_region_ = std::move(region);
246   } else {
247     jit_code_cache->private_region_ = std::move(region);
248   }
249 
250   VLOG(jit) << "Created jit code cache: initial capacity="
251             << PrettySize(initial_capacity)
252             << ", maximum capacity="
253             << PrettySize(max_capacity);
254 
255   return jit_code_cache.release();
256 }
257 
JitCodeCache()258 JitCodeCache::JitCodeCache()
259     : is_weak_access_enabled_(true),
260       inline_cache_cond_("Jit inline cache condition variable", *Locks::jit_lock_),
261       reserved_capacity_(GetInitialCapacity() * kReservedCapacityMultiplier),
262       zygote_map_(&shared_region_),
263       lock_cond_("Jit code cache condition variable", *Locks::jit_lock_),
264       collection_in_progress_(false),
265       garbage_collect_code_(true),
266       number_of_baseline_compilations_(0),
267       number_of_optimized_compilations_(0),
268       number_of_osr_compilations_(0),
269       number_of_collections_(0),
270       histogram_stack_map_memory_use_("Memory used for stack maps", 16),
271       histogram_code_memory_use_("Memory used for compiled code", 16),
272       histogram_profiling_info_memory_use_("Memory used for profiling info", 16) {
273 }
274 
~JitCodeCache()275 JitCodeCache::~JitCodeCache() {
276   if (private_region_.HasCodeMapping()) {
277     const MemMap* exec_pages = private_region_.GetExecPages();
278     Runtime::Current()->RemoveGeneratedCodeRange(exec_pages->Begin(), exec_pages->Size());
279   }
280   if (shared_region_.HasCodeMapping()) {
281     const MemMap* exec_pages = shared_region_.GetExecPages();
282     Runtime::Current()->RemoveGeneratedCodeRange(exec_pages->Begin(), exec_pages->Size());
283   }
284 }
285 
PrivateRegionContainsPc(const void * ptr) const286 bool JitCodeCache::PrivateRegionContainsPc(const void* ptr) const {
287   return private_region_.IsInExecSpace(ptr);
288 }
289 
ContainsPc(const void * ptr) const290 bool JitCodeCache::ContainsPc(const void* ptr) const {
291   return PrivateRegionContainsPc(ptr) || shared_region_.IsInExecSpace(ptr);
292 }
293 
ContainsMethod(ArtMethod * method)294 bool JitCodeCache::ContainsMethod(ArtMethod* method) {
295   Thread* self = Thread::Current();
296   ScopedDebugDisallowReadBarriers sddrb(self);
297   MutexLock mu(self, *Locks::jit_lock_);
298   if (UNLIKELY(method->IsNative())) {
299     auto it = jni_stubs_map_.find(JniStubKey(method));
300     if (it != jni_stubs_map_.end() &&
301         it->second.IsCompiled() &&
302         ContainsElement(it->second.GetMethods(), method)) {
303       return true;
304     }
305   } else {
306     for (const auto& it : method_code_map_) {
307       if (it.second == method) {
308         return true;
309       }
310     }
311     if (zygote_map_.ContainsMethod(method)) {
312       return true;
313     }
314   }
315   return false;
316 }
317 
GetJniStubCode(ArtMethod * method)318 const void* JitCodeCache::GetJniStubCode(ArtMethod* method) {
319   DCHECK(method->IsNative());
320   Thread* self = Thread::Current();
321   ScopedDebugDisallowReadBarriers sddrb(self);
322   MutexLock mu(self, *Locks::jit_lock_);
323   auto it = jni_stubs_map_.find(JniStubKey(method));
324   if (it != jni_stubs_map_.end()) {
325     JniStubData& data = it->second;
326     if (data.IsCompiled() && ContainsElement(data.GetMethods(), method)) {
327       return data.GetCode();
328     }
329   }
330   return nullptr;
331 }
332 
GetSavedEntryPointOfPreCompiledMethod(ArtMethod * method)333 const void* JitCodeCache::GetSavedEntryPointOfPreCompiledMethod(ArtMethod* method) {
334   Thread* self = Thread::Current();
335   ScopedDebugDisallowReadBarriers sddrb(self);
336   if (method->IsPreCompiled()) {
337     const void* code_ptr = nullptr;
338     if (method->GetDeclaringClass<kWithoutReadBarrier>()->IsBootStrapClassLoaded()) {
339       code_ptr = zygote_map_.GetCodeFor(method);
340     } else {
341       MutexLock mu(self, *Locks::jit_lock_);
342       auto it = saved_compiled_methods_map_.find(method);
343       if (it != saved_compiled_methods_map_.end()) {
344         code_ptr = it->second;
345         // Now that we're using the saved entrypoint, remove it from the saved map.
346         saved_compiled_methods_map_.erase(it);
347       }
348     }
349     if (code_ptr != nullptr) {
350       OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
351       return method_header->GetEntryPoint();
352     }
353   }
354   return nullptr;
355 }
356 
WaitForPotentialCollectionToComplete(Thread * self)357 bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) {
358   bool in_collection = false;
359   while (collection_in_progress_) {
360     in_collection = true;
361     lock_cond_.Wait(self);
362   }
363   return in_collection;
364 }
365 
FromCodeToAllocation(const void * code)366 static uintptr_t FromCodeToAllocation(const void* code) {
367   size_t alignment = GetInstructionSetCodeAlignment(kRuntimeISA);
368   return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment);
369 }
370 
FromAllocationToCode(const uint8_t * alloc)371 static const void* FromAllocationToCode(const uint8_t* alloc) {
372   size_t alignment = GetInstructionSetCodeAlignment(kRuntimeISA);
373   return reinterpret_cast<const void*>(alloc + RoundUp(sizeof(OatQuickMethodHeader), alignment));
374 }
375 
GetNumberOfRoots(const uint8_t * stack_map)376 static uint32_t GetNumberOfRoots(const uint8_t* stack_map) {
377   // The length of the table is stored just before the stack map (and therefore at the end of
378   // the table itself), in order to be able to fetch it from a `stack_map` pointer.
379   return reinterpret_cast<const uint32_t*>(stack_map)[-1];
380 }
381 
DCheckRootsAreValid(const std::vector<Handle<mirror::Object>> & roots,bool is_shared_region)382 static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots,
383                                 bool is_shared_region)
384     REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
385   if (!kIsDebugBuild) {
386     return;
387   }
388   // Put all roots in `roots_data`.
389   for (Handle<mirror::Object> object : roots) {
390     // Ensure the string is strongly interned. b/32995596
391     if (object->IsString()) {
392       ObjPtr<mirror::String> str = object->AsString();
393       ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
394       CHECK(class_linker->GetInternTable()->LookupStrong(Thread::Current(), str) != nullptr);
395     }
396     // Ensure that we don't put movable objects in the shared region.
397     if (is_shared_region) {
398       CHECK(!Runtime::Current()->GetHeap()->IsMovableObject(object.Get()));
399     }
400   }
401 }
402 
GetRootTable(const void * code_ptr,uint32_t * number_of_roots=nullptr)403 static const uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = nullptr) {
404   OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
405   uint8_t* data = method_header->GetOptimizedCodeInfoPtr();
406   uint32_t roots = GetNumberOfRoots(data);
407   if (number_of_roots != nullptr) {
408     *number_of_roots = roots;
409   }
410   return data - ComputeRootTableSize(roots);
411 }
412 
SweepRootTables(IsMarkedVisitor * visitor)413 void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) {
414   Thread* self = Thread::Current();
415   ScopedDebugDisallowReadBarriers sddrb(self);
416   MutexLock mu(self, *Locks::jit_lock_);
417   for (const auto& entry : method_code_map_) {
418     uint32_t number_of_roots = 0;
419     const uint8_t* root_table = GetRootTable(entry.first, &number_of_roots);
420     uint8_t* roots_data = private_region_.IsInDataSpace(root_table)
421         ? private_region_.GetWritableDataAddress(root_table)
422         : shared_region_.GetWritableDataAddress(root_table);
423     GcRoot<mirror::Object>* roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
424     for (uint32_t i = 0; i < number_of_roots; ++i) {
425       // This does not need a read barrier because this is called by GC.
426       mirror::Object* object = roots[i].Read<kWithoutReadBarrier>();
427       if (object == nullptr || object == Runtime::GetWeakClassSentinel()) {
428         // entry got deleted in a previous sweep.
429       } else if (object->IsString<kDefaultVerifyFlags>()) {
430         mirror::Object* new_object = visitor->IsMarked(object);
431         // We know the string is marked because it's a strongly-interned string that
432         // is always alive.
433         // TODO: Do not use IsMarked for j.l.Class, and adjust once we move this method
434         // out of the weak access/creation pause. b/32167580
435         DCHECK_NE(new_object, nullptr) << "old-string:" << object;
436         if (new_object != object) {
437           roots[i] = GcRoot<mirror::Object>(new_object);
438         }
439       } else {
440         mirror::Object* new_klass = visitor->IsMarked(object);
441         if (new_klass == nullptr) {
442           roots[i] = GcRoot<mirror::Object>(Runtime::GetWeakClassSentinel());
443         } else if (new_klass != object) {
444           roots[i] = GcRoot<mirror::Object>(new_klass);
445         }
446       }
447     }
448   }
449   // Walk over inline caches to clear entries containing unloaded classes.
450   for (const auto& [_, info] : profiling_infos_) {
451     InlineCache* caches = info->GetInlineCaches();
452     for (size_t i = 0; i < info->number_of_inline_caches_; ++i) {
453       InlineCache* cache = &caches[i];
454       for (size_t j = 0; j < InlineCache::kIndividualCacheSize; ++j) {
455         mirror::Class* klass = cache->classes_[j].Read<kWithoutReadBarrier>();
456         if (klass != nullptr) {
457           mirror::Class* new_klass = down_cast<mirror::Class*>(visitor->IsMarked(klass));
458           if (new_klass != klass) {
459             cache->classes_[j] = GcRoot<mirror::Class>(new_klass);
460           }
461         }
462       }
463     }
464   }
465 }
466 
FreeCodeAndData(const void * code_ptr)467 void JitCodeCache::FreeCodeAndData(const void* code_ptr) {
468   if (IsInZygoteExecSpace(code_ptr)) {
469     // No need to free, this is shared memory.
470     return;
471   }
472   uintptr_t allocation = FromCodeToAllocation(code_ptr);
473   const uint8_t* data = nullptr;
474   if (OatQuickMethodHeader::FromCodePointer(code_ptr)->IsOptimized()) {
475     data = GetRootTable(code_ptr);
476   }  // else this is a JNI stub without any data.
477 
478   FreeLocked(&private_region_, reinterpret_cast<uint8_t*>(allocation), data);
479 }
480 
FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader * > & method_headers)481 void JitCodeCache::FreeAllMethodHeaders(
482     const std::unordered_set<OatQuickMethodHeader*>& method_headers) {
483   // We need to remove entries in method_headers from CHA dependencies
484   // first since once we do FreeCode() below, the memory can be reused
485   // so it's possible for the same method_header to start representing
486   // different compile code.
487   {
488     MutexLock mu2(Thread::Current(), *Locks::cha_lock_);
489     Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis()
490         ->RemoveDependentsWithMethodHeaders(method_headers);
491   }
492 
493   {
494     ScopedCodeCacheWrite scc(private_region_);
495     for (const OatQuickMethodHeader* method_header : method_headers) {
496       FreeCodeAndData(method_header->GetCode());
497     }
498 
499     // We have potentially removed a lot of debug info. Do maintenance pass to save space.
500     RepackNativeDebugInfoForJit();
501   }
502 
503   // Check that the set of compiled methods exactly matches native debug information.
504   // Does not check zygote methods since they can change concurrently.
505   if (kIsDebugBuild && !Runtime::Current()->IsZygote()) {
506     std::map<const void*, ArtMethod*> compiled_methods;
507     std::set<const void*> debug_info;
508     VisitAllMethods([&](const void* addr, ArtMethod* method) {
509       if (!IsInZygoteExecSpace(addr)) {
510         CHECK(addr != nullptr && method != nullptr);
511         compiled_methods.emplace(addr, method);
512       }
513     });
514     ForEachNativeDebugSymbol([&](const void* addr, size_t, const char* name) {
515       addr = AlignDown(addr, GetInstructionSetInstructionAlignment(kRuntimeISA));  // Thumb-bit.
516       bool res = debug_info.emplace(addr).second;
517       CHECK(res) << "Duplicate debug info: " << addr << " " << name;
518       CHECK_EQ(compiled_methods.count(addr), 1u) << "Extra debug info: " << addr << " " << name;
519     });
520     if (!debug_info.empty()) {  // If debug-info generation is enabled.
521       for (const auto& [addr, method] : compiled_methods) {
522         CHECK_EQ(debug_info.count(addr), 1u) << "Mising debug info";
523       }
524       CHECK_EQ(compiled_methods.size(), debug_info.size());
525     }
526   }
527 }
528 
RemoveMethodsIn(Thread * self,const LinearAlloc & alloc)529 void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
530   ScopedTrace trace(__PRETTY_FUNCTION__);
531   ScopedDebugDisallowReadBarriers sddrb(self);
532   // We use a set to first collect all method_headers whose code need to be
533   // removed. We need to free the underlying code after we remove CHA dependencies
534   // for entries in this set. And it's more efficient to iterate through
535   // the CHA dependency map just once with an unordered_set.
536   std::unordered_set<OatQuickMethodHeader*> method_headers;
537   MutexLock mu(self, *Locks::jit_lock_);
538   // We do not check if a code cache GC is in progress, as this method comes
539   // with the classlinker_classes_lock_ held, and suspending ourselves could
540   // lead to a deadlock.
541   {
542     for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
543       it->second.RemoveMethodsIn(alloc);
544       if (it->second.GetMethods().empty()) {
545         method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->second.GetCode()));
546         it = jni_stubs_map_.erase(it);
547       } else {
548         it->first.UpdateShorty(it->second.GetMethods().front());
549         ++it;
550       }
551     }
552     for (auto it = zombie_jni_code_.begin(); it != zombie_jni_code_.end();) {
553       if (alloc.ContainsUnsafe(*it)) {
554         it = zombie_jni_code_.erase(it);
555       } else {
556         ++it;
557       }
558     }
559     for (auto it = processed_zombie_jni_code_.begin(); it != processed_zombie_jni_code_.end();) {
560       if (alloc.ContainsUnsafe(*it)) {
561         it = processed_zombie_jni_code_.erase(it);
562       } else {
563         ++it;
564       }
565     }
566     for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
567       if (alloc.ContainsUnsafe(it->second)) {
568         method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->first));
569         VLOG(jit) << "JIT removed " << it->second->PrettyMethod() << ": " << it->first;
570         zombie_code_.erase(it->first);
571         processed_zombie_code_.erase(it->first);
572         it = method_code_map_.erase(it);
573       } else {
574         ++it;
575       }
576     }
577   }
578   for (auto it = osr_code_map_.begin(); it != osr_code_map_.end();) {
579     DCHECK(!ContainsElement(zombie_code_, it->second));
580     if (alloc.ContainsUnsafe(it->first)) {
581       // Note that the code has already been pushed to method_headers in the loop
582       // above and is going to be removed in FreeCode() below.
583       it = osr_code_map_.erase(it);
584     } else {
585       ++it;
586     }
587   }
588   for (auto it = profiling_infos_.begin(); it != profiling_infos_.end();) {
589     ProfilingInfo* info = it->second;
590     if (alloc.ContainsUnsafe(info->GetMethod())) {
591       private_region_.FreeWritableData(reinterpret_cast<uint8_t*>(info));
592       it = profiling_infos_.erase(it);
593     } else {
594       ++it;
595     }
596   }
597   FreeAllMethodHeaders(method_headers);
598 }
599 
IsWeakAccessEnabled(Thread * self) const600 bool JitCodeCache::IsWeakAccessEnabled(Thread* self) const {
601   return gUseReadBarrier
602       ? self->GetWeakRefAccessEnabled()
603       : is_weak_access_enabled_.load(std::memory_order_seq_cst);
604 }
605 
WaitUntilInlineCacheAccessible(Thread * self)606 void JitCodeCache::WaitUntilInlineCacheAccessible(Thread* self) {
607   if (IsWeakAccessEnabled(self)) {
608     return;
609   }
610   ScopedThreadSuspension sts(self, ThreadState::kWaitingWeakGcRootRead);
611   MutexLock mu(self, *Locks::jit_lock_);
612   while (!IsWeakAccessEnabled(self)) {
613     inline_cache_cond_.Wait(self);
614   }
615 }
616 
BroadcastForInlineCacheAccess()617 void JitCodeCache::BroadcastForInlineCacheAccess() {
618   Thread* self = Thread::Current();
619   MutexLock mu(self, *Locks::jit_lock_);
620   inline_cache_cond_.Broadcast(self);
621 }
622 
AllowInlineCacheAccess()623 void JitCodeCache::AllowInlineCacheAccess() {
624   DCHECK(!gUseReadBarrier);
625   is_weak_access_enabled_.store(true, std::memory_order_seq_cst);
626   BroadcastForInlineCacheAccess();
627 }
628 
DisallowInlineCacheAccess()629 void JitCodeCache::DisallowInlineCacheAccess() {
630   DCHECK(!gUseReadBarrier);
631   is_weak_access_enabled_.store(false, std::memory_order_seq_cst);
632 }
633 
CopyInlineCacheInto(const InlineCache & ic,StackHandleScope<InlineCache::kIndividualCacheSize> * classes)634 void JitCodeCache::CopyInlineCacheInto(
635     const InlineCache& ic,
636     /*out*/StackHandleScope<InlineCache::kIndividualCacheSize>* classes) {
637   static_assert(arraysize(ic.classes_) == InlineCache::kIndividualCacheSize);
638   DCHECK_EQ(classes->Capacity(), InlineCache::kIndividualCacheSize);
639   DCHECK_EQ(classes->Size(), 0u);
640   WaitUntilInlineCacheAccessible(Thread::Current());
641   // Note that we don't need to lock `lock_` here, the compiler calling
642   // this method has already ensured the inline cache will not be deleted.
643   for (const GcRoot<mirror::Class>& root : ic.classes_) {
644     mirror::Class* object = root.Read();
645     if (object != nullptr) {
646       DCHECK_LT(classes->Size(), classes->Capacity());
647       classes->NewHandle(object);
648     }
649   }
650 }
651 
Commit(Thread * self,JitMemoryRegion * region,ArtMethod * method,ArrayRef<const uint8_t> reserved_code,ArrayRef<const uint8_t> code,ArrayRef<const uint8_t> reserved_data,const std::vector<Handle<mirror::Object>> & roots,ArrayRef<const uint8_t> stack_map,const std::vector<uint8_t> & debug_info,bool is_full_debug_info,CompilationKind compilation_kind,const ArenaSet<ArtMethod * > & cha_single_implementation_list)652 bool JitCodeCache::Commit(Thread* self,
653                           JitMemoryRegion* region,
654                           ArtMethod* method,
655                           ArrayRef<const uint8_t> reserved_code,
656                           ArrayRef<const uint8_t> code,
657                           ArrayRef<const uint8_t> reserved_data,
658                           const std::vector<Handle<mirror::Object>>& roots,
659                           ArrayRef<const uint8_t> stack_map,
660                           const std::vector<uint8_t>& debug_info,
661                           bool is_full_debug_info,
662                           CompilationKind compilation_kind,
663                           const ArenaSet<ArtMethod*>& cha_single_implementation_list) {
664   DCHECK_IMPLIES(method->IsNative(), (compilation_kind != CompilationKind::kOsr));
665 
666   if (!method->IsNative()) {
667     // We need to do this before grabbing the lock_ because it needs to be able to see the string
668     // InternTable. Native methods do not have roots.
669     DCheckRootsAreValid(roots, IsSharedRegion(*region));
670   }
671 
672   const uint8_t* roots_data = reserved_data.data();
673   size_t root_table_size = ComputeRootTableSize(roots.size());
674   const uint8_t* stack_map_data = roots_data + root_table_size;
675 
676   OatQuickMethodHeader* method_header = nullptr;
677   {
678     MutexLock mu(self, *Locks::jit_lock_);
679     const uint8_t* code_ptr = region->CommitCode(reserved_code, code, stack_map_data);
680     if (code_ptr == nullptr) {
681       return false;
682     }
683     method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
684 
685     // Commit roots and stack maps before updating the entry point.
686     if (!region->CommitData(reserved_data, roots, stack_map)) {
687       return false;
688     }
689 
690     switch (compilation_kind) {
691       case CompilationKind::kOsr:
692         number_of_osr_compilations_++;
693         break;
694       case CompilationKind::kBaseline:
695         number_of_baseline_compilations_++;
696         break;
697       case CompilationKind::kOptimized:
698         number_of_optimized_compilations_++;
699         break;
700     }
701 
702     // We need to update the debug info before the entry point gets set.
703     // At the same time we want to do under JIT lock so that debug info and JIT maps are in sync.
704     if (!debug_info.empty()) {
705       // NB: Don't allow packing of full info since it would remove non-backtrace data.
706       AddNativeDebugInfoForJit(code_ptr, debug_info, /*allow_packing=*/ !is_full_debug_info);
707     }
708 
709     // The following needs to be guarded by cha_lock_ also. Otherwise it's possible that the
710     // compiled code is considered invalidated by some class linking, but below we still make the
711     // compiled code valid for the method.  Need cha_lock_ for checking all single-implementation
712     // flags and register dependencies.
713     {
714       ScopedDebugDisallowReadBarriers sddrb(self);
715       MutexLock cha_mu(self, *Locks::cha_lock_);
716       bool single_impl_still_valid = true;
717       for (ArtMethod* single_impl : cha_single_implementation_list) {
718         if (!single_impl->HasSingleImplementation()) {
719           // Simply discard the compiled code.
720           // Hopefully the class hierarchy will be more stable when compilation is retried.
721           single_impl_still_valid = false;
722           break;
723         }
724       }
725 
726       // Discard the code if any single-implementation assumptions are now invalid.
727       if (UNLIKELY(!single_impl_still_valid)) {
728         VLOG(jit) << "JIT discarded jitted code due to invalid single-implementation assumptions.";
729         return false;
730       }
731       DCHECK(cha_single_implementation_list.empty() || !Runtime::Current()->IsJavaDebuggable())
732           << "Should not be using cha on debuggable apps/runs!";
733 
734       ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
735       for (ArtMethod* single_impl : cha_single_implementation_list) {
736         class_linker->GetClassHierarchyAnalysis()->AddDependency(
737             single_impl, method, method_header);
738       }
739     }
740 
741     if (UNLIKELY(method->IsNative())) {
742       ScopedDebugDisallowReadBarriers sddrb(self);
743       auto it = jni_stubs_map_.find(JniStubKey(method));
744       DCHECK(it != jni_stubs_map_.end())
745           << "Entry inserted in NotifyCompilationOf() should be alive.";
746       JniStubData* data = &it->second;
747       DCHECK(ContainsElement(data->GetMethods(), method))
748           << "Entry inserted in NotifyCompilationOf() should contain this method.";
749       data->SetCode(code_ptr);
750       data->UpdateEntryPoints(method_header->GetEntryPoint());
751     } else {
752       if (method->IsPreCompiled() && IsSharedRegion(*region)) {
753         ScopedDebugDisallowReadBarriers sddrb(self);
754         zygote_map_.Put(code_ptr, method);
755       } else {
756         ScopedDebugDisallowReadBarriers sddrb(self);
757         method_code_map_.Put(code_ptr, method);
758       }
759       if (compilation_kind == CompilationKind::kOsr) {
760         ScopedDebugDisallowReadBarriers sddrb(self);
761         osr_code_map_.Put(method, code_ptr);
762       } else if (method->StillNeedsClinitCheck()) {
763         ScopedDebugDisallowReadBarriers sddrb(self);
764         // This situation currently only occurs in the jit-zygote mode.
765         DCHECK(!garbage_collect_code_);
766         DCHECK(method->IsPreCompiled());
767         // The shared region can easily be queried. For the private region, we
768         // use a side map.
769         if (!IsSharedRegion(*region)) {
770           saved_compiled_methods_map_.Put(method, code_ptr);
771         }
772       } else {
773         Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
774             method, method_header->GetEntryPoint());
775       }
776     }
777     VLOG(jit)
778         << "JIT added (kind=" << compilation_kind << ") "
779         << ArtMethod::PrettyMethod(method) << "@" << method
780         << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": "
781         << " dcache_size=" << PrettySize(DataCacheSizeLocked()) << ": "
782         << reinterpret_cast<const void*>(method_header->GetEntryPoint()) << ","
783         << reinterpret_cast<const void*>(method_header->GetEntryPoint() +
784                                          method_header->GetCodeSize());
785   }
786 
787   if (kIsDebugBuild) {
788     uintptr_t entry_point = reinterpret_cast<uintptr_t>(method_header->GetEntryPoint());
789     DCHECK_EQ(LookupMethodHeader(entry_point, method), method_header) << method->PrettyMethod();
790     DCHECK_EQ(LookupMethodHeader(entry_point + method_header->GetCodeSize() - 1, method),
791               method_header) << method->PrettyMethod();
792   }
793   return true;
794 }
795 
CodeCacheSize()796 size_t JitCodeCache::CodeCacheSize() {
797   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
798   return CodeCacheSizeLocked();
799 }
800 
RemoveMethod(ArtMethod * method,bool release_memory)801 bool JitCodeCache::RemoveMethod(ArtMethod* method, bool release_memory) {
802   // This function is used only for testing and only with non-native methods.
803   CHECK(!method->IsNative());
804 
805   Thread* self = Thread::Current();
806   ScopedDebugDisallowReadBarriers sddrb(self);
807   MutexLock mu(self, *Locks::jit_lock_);
808 
809   bool osr = osr_code_map_.find(method) != osr_code_map_.end();
810   bool in_cache = RemoveMethodLocked(method, release_memory);
811 
812   if (!in_cache) {
813     return false;
814   }
815 
816   Runtime::Current()->GetInstrumentation()->InitializeMethodsCode(method, /*aot_code=*/ nullptr);
817   VLOG(jit)
818       << "JIT removed (osr=" << std::boolalpha << osr << std::noboolalpha << ") "
819       << ArtMethod::PrettyMethod(method) << "@" << method
820       << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": "
821       << " dcache_size=" << PrettySize(DataCacheSizeLocked());
822   return true;
823 }
824 
RemoveMethodLocked(ArtMethod * method,bool release_memory)825 bool JitCodeCache::RemoveMethodLocked(ArtMethod* method, bool release_memory) {
826   if (LIKELY(!method->IsNative())) {
827     auto it = profiling_infos_.find(method);
828     if (it != profiling_infos_.end()) {
829       profiling_infos_.erase(it);
830     }
831   }
832 
833   bool in_cache = false;
834   ScopedCodeCacheWrite ccw(private_region_);
835   if (UNLIKELY(method->IsNative())) {
836     auto it = jni_stubs_map_.find(JniStubKey(method));
837     if (it != jni_stubs_map_.end() && it->second.RemoveMethod(method)) {
838       in_cache = true;
839       if (it->second.GetMethods().empty()) {
840         if (release_memory) {
841           FreeCodeAndData(it->second.GetCode());
842         }
843         jni_stubs_map_.erase(it);
844         zombie_jni_code_.erase(method);
845       } else {
846         it->first.UpdateShorty(it->second.GetMethods().front());
847       }
848     }
849   } else {
850     for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
851       if (it->second == method) {
852         in_cache = true;
853         if (release_memory) {
854           FreeCodeAndData(it->first);
855         }
856         VLOG(jit) << "JIT removed " << it->second->PrettyMethod() << ": " << it->first;
857         it = method_code_map_.erase(it);
858       } else {
859         ++it;
860       }
861     }
862 
863     auto osr_it = osr_code_map_.find(method);
864     if (osr_it != osr_code_map_.end()) {
865       osr_code_map_.erase(osr_it);
866     }
867   }
868 
869   return in_cache;
870 }
871 
872 // This notifies the code cache that the given method has been redefined and that it should remove
873 // any cached information it has on the method. All threads must be suspended before calling this
874 // method. The compiled code for the method (if there is any) must not be in any threads call stack.
NotifyMethodRedefined(ArtMethod * method)875 void JitCodeCache::NotifyMethodRedefined(ArtMethod* method) {
876   Thread* self = Thread::Current();
877   ScopedDebugDisallowReadBarriers sddrb(self);
878   MutexLock mu(self, *Locks::jit_lock_);
879   RemoveMethodLocked(method, /* release_memory= */ true);
880 }
881 
882 // This invalidates old_method. Once this function returns one can no longer use old_method to
883 // execute code unless it is fixed up. This fixup will happen later in the process of installing a
884 // class redefinition.
885 // TODO We should add some info to ArtMethod to note that 'old_method' has been invalidated and
886 // shouldn't be used since it is no longer logically in the jit code cache.
887 // TODO We should add DCHECKS that validate that the JIT is paused when this method is entered.
MoveObsoleteMethod(ArtMethod * old_method,ArtMethod * new_method)888 void JitCodeCache::MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) {
889   Thread* self = Thread::Current();
890   ScopedDebugDisallowReadBarriers sddrb(self);
891   MutexLock mu(self, *Locks::jit_lock_);
892   if (old_method->IsNative()) {
893     // Update methods in jni_stubs_map_.
894     for (auto& entry : jni_stubs_map_) {
895       JniStubData& data = entry.second;
896       data.MoveObsoleteMethod(old_method, new_method);
897     }
898     return;
899   }
900   // Update method_code_map_ to point to the new method.
901   for (auto& it : method_code_map_) {
902     if (it.second == old_method) {
903       it.second = new_method;
904     }
905   }
906   // Update osr_code_map_ to point to the new method.
907   auto code_map = osr_code_map_.find(old_method);
908   if (code_map != osr_code_map_.end()) {
909     osr_code_map_.Put(new_method, code_map->second);
910     osr_code_map_.erase(old_method);
911   }
912 }
913 
TransitionToDebuggable()914 void JitCodeCache::TransitionToDebuggable() {
915   // Check that none of our methods have an entrypoint in the zygote exec
916   // space (this should be taken care of by
917   // ClassLinker::UpdateEntryPointsClassVisitor.
918   Thread* self = Thread::Current();
919   ScopedDebugDisallowReadBarriers sddrb(self);
920   {
921     MutexLock mu(self, *Locks::jit_lock_);
922     if (kIsDebugBuild) {
923       // TODO: Check `jni_stubs_map_`?
924       for (const auto& entry : method_code_map_) {
925         ArtMethod* method = entry.second;
926         DCHECK(!method->IsPreCompiled());
927         DCHECK(!IsInZygoteExecSpace(method->GetEntryPointFromQuickCompiledCode()));
928       }
929     }
930     // Not strictly necessary, but this map is useless now.
931     saved_compiled_methods_map_.clear();
932   }
933   if (kIsDebugBuild) {
934     for (const auto& entry : zygote_map_) {
935       ArtMethod* method = entry.method;
936       if (method != nullptr) {
937         DCHECK(!method->IsPreCompiled());
938         DCHECK(!IsInZygoteExecSpace(method->GetEntryPointFromQuickCompiledCode()));
939       }
940     }
941   }
942 }
943 
CodeCacheSizeLocked()944 size_t JitCodeCache::CodeCacheSizeLocked() {
945   return GetCurrentRegion()->GetUsedMemoryForCode();
946 }
947 
DataCacheSize()948 size_t JitCodeCache::DataCacheSize() {
949   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
950   return DataCacheSizeLocked();
951 }
952 
DataCacheSizeLocked()953 size_t JitCodeCache::DataCacheSizeLocked() {
954   return GetCurrentRegion()->GetUsedMemoryForData();
955 }
956 
Reserve(Thread * self,JitMemoryRegion * region,size_t code_size,size_t stack_map_size,size_t number_of_roots,ArtMethod * method,ArrayRef<const uint8_t> * reserved_code,ArrayRef<const uint8_t> * reserved_data)957 bool JitCodeCache::Reserve(Thread* self,
958                            JitMemoryRegion* region,
959                            size_t code_size,
960                            size_t stack_map_size,
961                            size_t number_of_roots,
962                            ArtMethod* method,
963                            /*out*/ArrayRef<const uint8_t>* reserved_code,
964                            /*out*/ArrayRef<const uint8_t>* reserved_data) {
965   code_size = OatQuickMethodHeader::InstructionAlignedSize() + code_size;
966   size_t data_size = RoundUp(ComputeRootTableSize(number_of_roots) + stack_map_size, sizeof(void*));
967 
968   const uint8_t* code;
969   const uint8_t* data;
970   while (true) {
971     bool at_max_capacity = false;
972     {
973       ScopedThreadSuspension sts(self, ThreadState::kSuspended);
974       MutexLock mu(self, *Locks::jit_lock_);
975       ScopedCodeCacheWrite ccw(*region);
976       code = region->AllocateCode(code_size);
977       data = region->AllocateData(data_size);
978       at_max_capacity = IsAtMaxCapacity();
979     }
980     if (code != nullptr && data != nullptr) {
981       break;
982     }
983     Free(self, region, code, data);
984     if (at_max_capacity) {
985       VLOG(jit) << "JIT failed to allocate code of size "
986                 << PrettySize(code_size)
987                 << ", and data of size "
988                 << PrettySize(data_size);
989       return false;
990     }
991     // Increase the capacity and try again.
992     IncreaseCodeCacheCapacity(self);
993   }
994 
995   *reserved_code = ArrayRef<const uint8_t>(code, code_size);
996   *reserved_data = ArrayRef<const uint8_t>(data, data_size);
997 
998   MutexLock mu(self, *Locks::jit_lock_);
999   histogram_code_memory_use_.AddValue(code_size);
1000   if (code_size > kCodeSizeLogThreshold) {
1001     LOG(INFO) << "JIT allocated "
1002               << PrettySize(code_size)
1003               << " for compiled code of "
1004               << ArtMethod::PrettyMethod(method);
1005   }
1006   histogram_stack_map_memory_use_.AddValue(data_size);
1007   if (data_size > kStackMapSizeLogThreshold) {
1008     LOG(INFO) << "JIT allocated "
1009               << PrettySize(data_size)
1010               << " for stack maps of "
1011               << ArtMethod::PrettyMethod(method);
1012   }
1013   return true;
1014 }
1015 
Free(Thread * self,JitMemoryRegion * region,const uint8_t * code,const uint8_t * data)1016 void JitCodeCache::Free(Thread* self,
1017                         JitMemoryRegion* region,
1018                         const uint8_t* code,
1019                         const uint8_t* data) {
1020   MutexLock mu(self, *Locks::jit_lock_);
1021   ScopedCodeCacheWrite ccw(*region);
1022   FreeLocked(region, code, data);
1023 }
1024 
FreeLocked(JitMemoryRegion * region,const uint8_t * code,const uint8_t * data)1025 void JitCodeCache::FreeLocked(JitMemoryRegion* region, const uint8_t* code, const uint8_t* data) {
1026   if (code != nullptr) {
1027     RemoveNativeDebugInfoForJit(reinterpret_cast<const void*>(FromAllocationToCode(code)));
1028     region->FreeCode(code);
1029   }
1030   if (data != nullptr) {
1031     region->FreeData(data);
1032   }
1033 }
1034 
1035 class MarkCodeClosure final : public Closure {
1036  public:
MarkCodeClosure(JitCodeCache * code_cache,CodeCacheBitmap * bitmap,Barrier * barrier)1037   MarkCodeClosure(JitCodeCache* code_cache, CodeCacheBitmap* bitmap, Barrier* barrier)
1038       : code_cache_(code_cache), bitmap_(bitmap), barrier_(barrier) {}
1039 
Run(Thread * thread)1040   void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
1041     ScopedTrace trace(__PRETTY_FUNCTION__);
1042     DCHECK(thread == Thread::Current() || thread->IsSuspended());
1043     StackVisitor::WalkStack(
1044         [&](const art::StackVisitor* stack_visitor) {
1045           const OatQuickMethodHeader* method_header =
1046               stack_visitor->GetCurrentOatQuickMethodHeader();
1047           if (method_header == nullptr) {
1048             return true;
1049           }
1050           const void* code = method_header->GetCode();
1051           if (code_cache_->ContainsPc(code) && !code_cache_->IsInZygoteExecSpace(code)) {
1052             // Use the atomic set version, as multiple threads are executing this code.
1053             bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
1054           }
1055           return true;
1056         },
1057         thread,
1058         /* context= */ nullptr,
1059         art::StackVisitor::StackWalkKind::kSkipInlinedFrames);
1060 
1061     barrier_->Pass(Thread::Current());
1062   }
1063 
1064  private:
1065   JitCodeCache* const code_cache_;
1066   CodeCacheBitmap* const bitmap_;
1067   Barrier* const barrier_;
1068 };
1069 
MarkCompiledCodeOnThreadStacks(Thread * self)1070 void JitCodeCache::MarkCompiledCodeOnThreadStacks(Thread* self) {
1071   Barrier barrier(0);
1072   size_t threads_running_checkpoint = 0;
1073   MarkCodeClosure closure(this, GetLiveBitmap(), &barrier);
1074   threads_running_checkpoint = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
1075   // Now that we have run our checkpoint, move to a suspended state and wait
1076   // for other threads to run the checkpoint.
1077   ScopedThreadSuspension sts(self, ThreadState::kSuspended);
1078   if (threads_running_checkpoint != 0) {
1079     barrier.Increment(self, threads_running_checkpoint);
1080   }
1081 }
1082 
IsAtMaxCapacity() const1083 bool JitCodeCache::IsAtMaxCapacity() const {
1084   return private_region_.GetCurrentCapacity() == private_region_.GetMaxCapacity();
1085 }
1086 
IncreaseCodeCacheCapacity(Thread * self)1087 void JitCodeCache::IncreaseCodeCacheCapacity(Thread* self) {
1088   ScopedThreadSuspension sts(self, ThreadState::kSuspended);
1089   MutexLock mu(self, *Locks::jit_lock_);
1090   // Wait for a potential collection, as the size of the bitmap used by that collection
1091   // is of the current capacity.
1092   WaitForPotentialCollectionToComplete(self);
1093   private_region_.IncreaseCodeCacheCapacity();
1094 }
1095 
RemoveUnmarkedCode(Thread * self)1096 void JitCodeCache::RemoveUnmarkedCode(Thread* self) {
1097   ScopedTrace trace(__FUNCTION__);
1098   std::unordered_set<OatQuickMethodHeader*> method_headers;
1099   ScopedDebugDisallowReadBarriers sddrb(self);
1100   MutexLock mu(self, *Locks::jit_lock_);
1101   // Iterate over all zombie code and remove entries that are not marked.
1102   for (auto it = processed_zombie_code_.begin(); it != processed_zombie_code_.end();) {
1103     const void* code_ptr = *it;
1104     uintptr_t allocation = FromCodeToAllocation(code_ptr);
1105     DCHECK(!IsInZygoteExecSpace(code_ptr));
1106     if (GetLiveBitmap()->Test(allocation)) {
1107       ++it;
1108     } else {
1109       OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1110       method_headers.insert(header);
1111       method_code_map_.erase(header->GetCode());
1112       VLOG(jit) << "JIT removed " << *it;
1113       it = processed_zombie_code_.erase(it);
1114     }
1115   }
1116   for (auto it = processed_zombie_jni_code_.begin(); it != processed_zombie_jni_code_.end();) {
1117     ArtMethod* method = *it;
1118     auto stub = jni_stubs_map_.find(JniStubKey(method));
1119     if (stub == jni_stubs_map_.end()) {
1120       it = processed_zombie_jni_code_.erase(it);
1121       continue;
1122     }
1123     JniStubData& data = stub->second;
1124     if (!data.IsCompiled() || !ContainsElement(data.GetMethods(), method)) {
1125       it = processed_zombie_jni_code_.erase(it);
1126     } else if (method->GetEntryPointFromQuickCompiledCode() ==
1127             OatQuickMethodHeader::FromCodePointer(data.GetCode())->GetEntryPoint()) {
1128       // The stub got reused for this method, remove ourselves from the zombie
1129       // list.
1130       it = processed_zombie_jni_code_.erase(it);
1131     } else if (!GetLiveBitmap()->Test(FromCodeToAllocation(data.GetCode()))) {
1132       data.RemoveMethod(method);
1133       if (data.GetMethods().empty()) {
1134         OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(data.GetCode());
1135         method_headers.insert(header);
1136         CHECK(ContainsPc(header));
1137         VLOG(jit) << "JIT removed native code of" << method->PrettyMethod();
1138         jni_stubs_map_.erase(stub);
1139       } else {
1140         stub->first.UpdateShorty(stub->second.GetMethods().front());
1141       }
1142       it = processed_zombie_jni_code_.erase(it);
1143     } else {
1144       ++it;
1145     }
1146   }
1147   FreeAllMethodHeaders(method_headers);
1148 }
1149 
AddZombieCode(ArtMethod * method,const void * entry_point)1150 void JitCodeCache::AddZombieCode(ArtMethod* method, const void* entry_point) {
1151   CHECK(ContainsPc(entry_point));
1152   CHECK(method->IsNative() || (method->GetEntryPointFromQuickCompiledCode() != entry_point));
1153   const void* code_ptr = OatQuickMethodHeader::FromEntryPoint(entry_point)->GetCode();
1154   if (!IsInZygoteExecSpace(code_ptr)) {
1155     Thread* self = Thread::Current();
1156     if (Locks::jit_lock_->IsExclusiveHeld(self)) {
1157       AddZombieCodeInternal(method, code_ptr);
1158     } else {
1159       MutexLock mu(Thread::Current(), *Locks::jit_lock_);
1160       AddZombieCodeInternal(method, code_ptr);
1161     }
1162   }
1163 }
1164 
1165 
1166 class JitGcTask final : public Task {
1167  public:
JitGcTask()1168   JitGcTask() {}
1169 
Run(Thread * self)1170   void Run(Thread* self) override {
1171     Runtime::Current()->GetJit()->GetCodeCache()->DoCollection(self);
1172   }
1173 
Finalize()1174   void Finalize() override {
1175     delete this;
1176   }
1177 };
1178 
AddZombieCodeInternal(ArtMethod * method,const void * code_ptr)1179 void JitCodeCache::AddZombieCodeInternal(ArtMethod* method, const void* code_ptr) {
1180   if (method->IsNative()) {
1181     CHECK(jni_stubs_map_.find(JniStubKey(method)) != jni_stubs_map_.end());
1182     zombie_jni_code_.insert(method);
1183   } else {
1184     CHECK(!ContainsElement(zombie_code_, code_ptr));
1185     zombie_code_.insert(code_ptr);
1186   }
1187   // Arbitrary threshold of number of zombie code before doing a GC.
1188   static constexpr size_t kNumberOfZombieCodeThreshold = kIsDebugBuild ? 1 : 1000;
1189   size_t number_of_code_to_delete =
1190       zombie_code_.size() + zombie_jni_code_.size() + osr_code_map_.size();
1191   if (number_of_code_to_delete >= kNumberOfZombieCodeThreshold) {
1192     JitThreadPool* pool = Runtime::Current()->GetJit()->GetThreadPool();
1193     if (pool != nullptr && !gc_task_scheduled_) {
1194       gc_task_scheduled_ = true;
1195       pool->AddTask(Thread::Current(), new JitGcTask());
1196     }
1197   }
1198 }
1199 
GetGarbageCollectCode()1200 bool JitCodeCache::GetGarbageCollectCode() {
1201   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
1202   return garbage_collect_code_;
1203 }
1204 
SetGarbageCollectCode(bool value)1205 void JitCodeCache::SetGarbageCollectCode(bool value) {
1206   Thread* self = Thread::Current();
1207   MutexLock mu(self, *Locks::jit_lock_);
1208   // Update the flag while holding the lock to ensure no thread will try to GC.
1209   garbage_collect_code_ = value;
1210 }
1211 
GetProfilingInfo(ArtMethod * method,Thread * self)1212 ProfilingInfo* JitCodeCache::GetProfilingInfo(ArtMethod* method, Thread* self) {
1213   ScopedDebugDisallowReadBarriers sddrb(self);
1214   MutexLock mu(self, *Locks::jit_lock_);
1215   auto it = profiling_infos_.find(method);
1216   if (it == profiling_infos_.end()) {
1217     return nullptr;
1218   }
1219   return it->second;
1220 }
1221 
MaybeUpdateInlineCache(ArtMethod * method,uint32_t dex_pc,ObjPtr<mirror::Class> cls,Thread * self)1222 void JitCodeCache::MaybeUpdateInlineCache(ArtMethod* method,
1223                                           uint32_t dex_pc,
1224                                           ObjPtr<mirror::Class> cls,
1225                                           Thread* self) {
1226   ScopedDebugDisallowReadBarriers sddrb(self);
1227   MutexLock mu(self, *Locks::jit_lock_);
1228   auto it = profiling_infos_.find(method);
1229   if (it == profiling_infos_.end()) {
1230     return;
1231   }
1232   ProfilingInfo* info = it->second;
1233   ScopedAssertNoThreadSuspension sants("ProfilingInfo");
1234   info->AddInvokeInfo(dex_pc, cls.Ptr());
1235 }
1236 
DoCollection(Thread * self)1237 void JitCodeCache::DoCollection(Thread* self) {
1238   ScopedTrace trace(__FUNCTION__);
1239 
1240   {
1241     ScopedDebugDisallowReadBarriers sddrb(self);
1242     MutexLock mu(self, *Locks::jit_lock_);
1243     if (!garbage_collect_code_) {
1244       return;
1245     } else if (WaitForPotentialCollectionToComplete(self)) {
1246       return;
1247     }
1248     collection_in_progress_ = true;
1249     number_of_collections_++;
1250     live_bitmap_.reset(CodeCacheBitmap::Create(
1251           "code-cache-bitmap",
1252           reinterpret_cast<uintptr_t>(private_region_.GetExecPages()->Begin()),
1253           reinterpret_cast<uintptr_t>(
1254               private_region_.GetExecPages()->Begin() + private_region_.GetCurrentCapacity() / 2)));
1255     processed_zombie_code_.insert(zombie_code_.begin(), zombie_code_.end());
1256     zombie_code_.clear();
1257     processed_zombie_jni_code_.insert(zombie_jni_code_.begin(), zombie_jni_code_.end());
1258     zombie_jni_code_.clear();
1259     // Empty osr method map, as osr compiled code will be deleted (except the ones
1260     // on thread stacks).
1261     for (auto it = osr_code_map_.begin(); it != osr_code_map_.end(); ++it) {
1262       processed_zombie_code_.insert(it->second);
1263     }
1264     osr_code_map_.clear();
1265   }
1266   TimingLogger logger("JIT code cache timing logger", true, VLOG_IS_ON(jit));
1267   {
1268     TimingLogger::ScopedTiming st("Code cache collection", &logger);
1269 
1270     {
1271       ScopedObjectAccess soa(self);
1272       // Run a checkpoint on all threads to mark the JIT compiled code they are running.
1273       MarkCompiledCodeOnThreadStacks(self);
1274 
1275       // Remove zombie code which hasn't been marked.
1276       RemoveUnmarkedCode(self);
1277     }
1278 
1279     MutexLock mu(self, *Locks::jit_lock_);
1280     live_bitmap_.reset(nullptr);
1281     NotifyCollectionDone(self);
1282   }
1283 
1284   Runtime::Current()->GetJit()->AddTimingLogger(logger);
1285 }
1286 
NotifyCollectionDone(Thread * self)1287 void JitCodeCache::NotifyCollectionDone(Thread* self) {
1288   collection_in_progress_ = false;
1289   gc_task_scheduled_ = false;
1290   lock_cond_.Broadcast(self);
1291 }
1292 
LookupMethodHeader(uintptr_t pc,ArtMethod * method)1293 OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
1294   static_assert(kRuntimeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA");
1295   const void* pc_ptr = reinterpret_cast<const void*>(pc);
1296   if (!ContainsPc(pc_ptr)) {
1297     return nullptr;
1298   }
1299 
1300   if (!kIsDebugBuild) {
1301     // Called with null `method` only from MarkCodeClosure::Run() in debug build.
1302     CHECK(method != nullptr);
1303   }
1304 
1305   Thread* self = Thread::Current();
1306   ScopedDebugDisallowReadBarriers sddrb(self);
1307   MutexLock mu(self, *Locks::jit_lock_);
1308   OatQuickMethodHeader* method_header = nullptr;
1309   ArtMethod* found_method = nullptr;  // Only for DCHECK(), not for JNI stubs.
1310   if (method != nullptr && UNLIKELY(method->IsNative())) {
1311     auto it = jni_stubs_map_.find(JniStubKey(method));
1312     if (it == jni_stubs_map_.end()) {
1313       return nullptr;
1314     }
1315     if (!ContainsElement(it->second.GetMethods(), method)) {
1316       DCHECK(!OatQuickMethodHeader::FromCodePointer(it->second.GetCode())->Contains(pc))
1317           << "Method missing from stub map, but pc executing the method points to the stub."
1318           << " method= " << method->PrettyMethod()
1319           << " pc= " << std::hex << pc;
1320       return nullptr;
1321     }
1322     const void* code_ptr = it->second.GetCode();
1323     method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1324     if (!method_header->Contains(pc)) {
1325       return nullptr;
1326     }
1327   } else {
1328     if (shared_region_.IsInExecSpace(pc_ptr)) {
1329       const void* code_ptr = zygote_map_.GetCodeFor(method, pc);
1330       if (code_ptr != nullptr) {
1331         return OatQuickMethodHeader::FromCodePointer(code_ptr);
1332       }
1333     }
1334     auto it = method_code_map_.lower_bound(pc_ptr);
1335     if ((it == method_code_map_.end() || it->first != pc_ptr) &&
1336         it != method_code_map_.begin()) {
1337       --it;
1338     }
1339     if (it != method_code_map_.end()) {
1340       const void* code_ptr = it->first;
1341       if (OatQuickMethodHeader::FromCodePointer(code_ptr)->Contains(pc)) {
1342         method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1343         found_method = it->second;
1344       }
1345     }
1346     if (method_header == nullptr && method == nullptr) {
1347       // Scan all compiled JNI stubs as well. This slow search is used only
1348       // for checks in debug build, for release builds the `method` is not null.
1349       for (auto&& entry : jni_stubs_map_) {
1350         const JniStubData& data = entry.second;
1351         if (data.IsCompiled() &&
1352             OatQuickMethodHeader::FromCodePointer(data.GetCode())->Contains(pc)) {
1353           method_header = OatQuickMethodHeader::FromCodePointer(data.GetCode());
1354         }
1355       }
1356     }
1357     if (method_header == nullptr) {
1358       return nullptr;
1359     }
1360   }
1361 
1362   if (kIsDebugBuild && method != nullptr && !method->IsNative()) {
1363     DCHECK_EQ(found_method, method)
1364         << ArtMethod::PrettyMethod(method) << " "
1365         << ArtMethod::PrettyMethod(found_method) << " "
1366         << std::hex << pc;
1367   }
1368   return method_header;
1369 }
1370 
LookupOsrMethodHeader(ArtMethod * method)1371 OatQuickMethodHeader* JitCodeCache::LookupOsrMethodHeader(ArtMethod* method) {
1372   Thread* self = Thread::Current();
1373   ScopedDebugDisallowReadBarriers sddrb(self);
1374   MutexLock mu(self, *Locks::jit_lock_);
1375   auto it = osr_code_map_.find(method);
1376   if (it == osr_code_map_.end()) {
1377     return nullptr;
1378   }
1379   return OatQuickMethodHeader::FromCodePointer(it->second);
1380 }
1381 
AddProfilingInfo(Thread * self,ArtMethod * method,const std::vector<uint32_t> & inline_cache_entries,const std::vector<uint32_t> & branch_cache_entries)1382 ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self,
1383                                               ArtMethod* method,
1384                                               const std::vector<uint32_t>& inline_cache_entries,
1385                                               const std::vector<uint32_t>& branch_cache_entries) {
1386   DCHECK(CanAllocateProfilingInfo());
1387   ProfilingInfo* info = nullptr;
1388   {
1389     MutexLock mu(self, *Locks::jit_lock_);
1390     info = AddProfilingInfoInternal(self, method, inline_cache_entries, branch_cache_entries);
1391   }
1392 
1393   if (info == nullptr) {
1394     IncreaseCodeCacheCapacity(self);
1395     MutexLock mu(self, *Locks::jit_lock_);
1396     info = AddProfilingInfoInternal(self, method, inline_cache_entries, branch_cache_entries);
1397   }
1398   return info;
1399 }
1400 
AddProfilingInfoInternal(Thread * self,ArtMethod * method,const std::vector<uint32_t> & inline_cache_entries,const std::vector<uint32_t> & branch_cache_entries)1401 ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(
1402     Thread* self,
1403     ArtMethod* method,
1404     const std::vector<uint32_t>& inline_cache_entries,
1405     const std::vector<uint32_t>& branch_cache_entries) {
1406   ScopedDebugDisallowReadBarriers sddrb(self);
1407   // Check whether some other thread has concurrently created it.
1408   auto it = profiling_infos_.find(method);
1409   if (it != profiling_infos_.end()) {
1410     return it->second;
1411   }
1412 
1413   size_t profile_info_size =
1414       ProfilingInfo::ComputeSize(inline_cache_entries.size(), branch_cache_entries.size());
1415 
1416   const uint8_t* data = private_region_.AllocateData(profile_info_size);
1417   if (data == nullptr) {
1418     return nullptr;
1419   }
1420   uint8_t* writable_data = private_region_.GetWritableDataAddress(data);
1421   ProfilingInfo* info =
1422       new (writable_data) ProfilingInfo(method, inline_cache_entries, branch_cache_entries);
1423 
1424   profiling_infos_.Put(method, info);
1425   histogram_profiling_info_memory_use_.AddValue(profile_info_size);
1426   return info;
1427 }
1428 
MoreCore(const void * mspace,intptr_t increment)1429 void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) {
1430   return shared_region_.OwnsSpace(mspace)
1431       ? shared_region_.MoreCore(mspace, increment)
1432       : private_region_.MoreCore(mspace, increment);
1433 }
1434 
GetProfiledMethods(const std::set<std::string> & dex_base_locations,std::vector<ProfileMethodInfo> & methods,uint16_t inline_cache_threshold)1435 void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_locations,
1436                                       std::vector<ProfileMethodInfo>& methods,
1437                                       uint16_t inline_cache_threshold) {
1438   ScopedTrace trace(__FUNCTION__);
1439   Thread* self = Thread::Current();
1440 
1441   // Preserve class loaders to prevent unloading while we're processing
1442   // ArtMethods.
1443   VariableSizedHandleScope handles(self);
1444   Runtime::Current()->GetClassLinker()->GetClassLoaders(self, &handles);
1445 
1446   // Wait for any GC to be complete, to prevent looking at ArtMethods whose
1447   // class loader is being deleted.
1448   Runtime::Current()->GetHeap()->WaitForGcToComplete(gc::kGcCauseProfileSaver, self);
1449 
1450   // We'll be looking at inline caches, so ensure they are accessible.
1451   WaitUntilInlineCacheAccessible(self);
1452 
1453   SafeMap<ArtMethod*, ProfilingInfo*> profiling_infos;
1454   std::vector<ArtMethod*> copies;
1455   {
1456     MutexLock mu(self, *Locks::jit_lock_);
1457     profiling_infos = profiling_infos_;
1458     for (const auto& entry : method_code_map_) {
1459       copies.push_back(entry.second);
1460     }
1461   }
1462   for (ArtMethod* method : copies) {
1463     auto it = profiling_infos.find(method);
1464     ProfilingInfo* info = (it == profiling_infos.end()) ? nullptr : it->second;
1465     const DexFile* dex_file = method->GetDexFile();
1466     const std::string base_location = DexFileLoader::GetBaseLocation(dex_file->GetLocation());
1467     if (!ContainsElement(dex_base_locations, base_location)) {
1468       // Skip dex files which are not profiled.
1469       continue;
1470     }
1471     std::vector<ProfileMethodInfo::ProfileInlineCache> inline_caches;
1472 
1473     if (info != nullptr) {
1474       // If the method is still baseline compiled and doesn't meet the inline cache threshold, don't
1475       // save the inline caches because they might be incomplete.
1476       // Although we don't deoptimize for incomplete inline caches in AOT-compiled code, inlining
1477       // leads to larger generated code.
1478       // If the inline cache is empty the compiler will generate a regular invoke virtual/interface.
1479       const void* entry_point = method->GetEntryPointFromQuickCompiledCode();
1480       if (ContainsPc(entry_point) &&
1481           CodeInfo::IsBaseline(
1482               OatQuickMethodHeader::FromEntryPoint(entry_point)->GetOptimizedCodeInfoPtr()) &&
1483           (ProfilingInfo::GetOptimizeThreshold() - info->GetBaselineHotnessCount()) <
1484               inline_cache_threshold) {
1485         methods.emplace_back(/*ProfileMethodInfo*/
1486             MethodReference(dex_file, method->GetDexMethodIndex()), inline_caches);
1487         continue;
1488       }
1489 
1490       for (size_t i = 0; i < info->number_of_inline_caches_; ++i) {
1491         std::vector<TypeReference> profile_classes;
1492         const InlineCache& cache = info->GetInlineCaches()[i];
1493         ArtMethod* caller = info->GetMethod();
1494         bool is_missing_types = false;
1495         for (size_t k = 0; k < InlineCache::kIndividualCacheSize; k++) {
1496           mirror::Class* cls = cache.classes_[k].Read();
1497           if (cls == nullptr) {
1498             break;
1499           }
1500 
1501           // Check if the receiver is in the boot class path or if it's in the
1502           // same class loader as the caller. If not, skip it, as there is not
1503           // much we can do during AOT.
1504           if (!cls->IsBootStrapClassLoaded() &&
1505               caller->GetClassLoader() != cls->GetClassLoader()) {
1506             is_missing_types = true;
1507             continue;
1508           }
1509 
1510           const DexFile* class_dex_file = nullptr;
1511           dex::TypeIndex type_index;
1512 
1513           if (cls->GetDexCache() == nullptr) {
1514             DCHECK(cls->IsArrayClass()) << cls->PrettyClass();
1515             // Make a best effort to find the type index in the method's dex file.
1516             // We could search all open dex files but that might turn expensive
1517             // and probably not worth it.
1518             class_dex_file = dex_file;
1519             type_index = cls->FindTypeIndexInOtherDexFile(*dex_file);
1520           } else {
1521             class_dex_file = &(cls->GetDexFile());
1522             type_index = cls->GetDexTypeIndex();
1523           }
1524           if (!type_index.IsValid()) {
1525             // Could be a proxy class or an array for which we couldn't find the type index.
1526             is_missing_types = true;
1527             continue;
1528           }
1529           if (ContainsElement(dex_base_locations,
1530                               DexFileLoader::GetBaseLocation(class_dex_file->GetLocation()))) {
1531             // Only consider classes from the same apk (including multidex).
1532             profile_classes.emplace_back(/*ProfileMethodInfo::ProfileClassReference*/
1533                 class_dex_file, type_index);
1534           } else {
1535             is_missing_types = true;
1536           }
1537         }
1538         if (!profile_classes.empty()) {
1539           inline_caches.emplace_back(/*ProfileMethodInfo::ProfileInlineCache*/
1540               cache.dex_pc_, is_missing_types, profile_classes);
1541         }
1542       }
1543     }
1544     methods.emplace_back(/*ProfileMethodInfo*/
1545         MethodReference(dex_file, method->GetDexMethodIndex()), inline_caches);
1546   }
1547 }
1548 
IsOsrCompiled(ArtMethod * method)1549 bool JitCodeCache::IsOsrCompiled(ArtMethod* method) {
1550   Thread* self = Thread::Current();
1551   ScopedDebugDisallowReadBarriers sddrb(self);
1552   MutexLock mu(self, *Locks::jit_lock_);
1553   return osr_code_map_.find(method) != osr_code_map_.end();
1554 }
1555 
NotifyCompilationOf(ArtMethod * method,Thread * self,CompilationKind compilation_kind,bool prejit)1556 bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
1557                                        Thread* self,
1558                                        CompilationKind compilation_kind,
1559                                        bool prejit) {
1560   const void* existing_entry_point = method->GetEntryPointFromQuickCompiledCode();
1561   if (compilation_kind == CompilationKind::kBaseline && ContainsPc(existing_entry_point)) {
1562     // The existing entry point is either already baseline, or optimized. No
1563     // need to compile.
1564     VLOG(jit) << "Not compiling "
1565               << method->PrettyMethod()
1566               << " baseline, because it has already been compiled";
1567     return false;
1568   }
1569 
1570   if (method->NeedsClinitCheckBeforeCall() && !prejit) {
1571     // We do not need a synchronization barrier for checking the visibly initialized status
1572     // or checking the initialized status just for requesting visible initialization.
1573     ClassStatus status = method->GetDeclaringClass()
1574         ->GetStatus<kDefaultVerifyFlags, /*kWithSynchronizationBarrier=*/ false>();
1575     if (status != ClassStatus::kVisiblyInitialized) {
1576       // Unless we're pre-jitting, we currently don't save the JIT compiled code if we cannot
1577       // update the entrypoint due to needing an initialization check.
1578       if (status == ClassStatus::kInitialized) {
1579         // Request visible initialization but do not block to allow compiling other methods.
1580         // Hopefully, this will complete by the time the method becomes hot again.
1581         Runtime::Current()->GetClassLinker()->MakeInitializedClassesVisiblyInitialized(
1582             self, /*wait=*/ false);
1583       }
1584       VLOG(jit) << "Not compiling "
1585                 << method->PrettyMethod()
1586                 << " because it has the resolution stub";
1587       return false;
1588     }
1589   }
1590 
1591   ScopedDebugDisallowReadBarriers sddrb(self);
1592   if (compilation_kind == CompilationKind::kOsr) {
1593     MutexLock mu(self, *Locks::jit_lock_);
1594     if (osr_code_map_.find(method) != osr_code_map_.end()) {
1595       return false;
1596     }
1597   }
1598 
1599   if (UNLIKELY(method->IsNative())) {
1600     MutexLock mu(self, *Locks::jit_lock_);
1601     JniStubKey key(method);
1602     auto it = jni_stubs_map_.find(key);
1603     bool new_compilation = false;
1604     if (it == jni_stubs_map_.end()) {
1605       // Create a new entry to mark the stub as being compiled.
1606       it = jni_stubs_map_.Put(key, JniStubData{});
1607       new_compilation = true;
1608     }
1609     JniStubData* data = &it->second;
1610     data->AddMethod(method);
1611     if (data->IsCompiled()) {
1612       OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(data->GetCode());
1613       const void* entrypoint = method_header->GetEntryPoint();
1614       // Update also entrypoints of other methods held by the JniStubData.
1615       // We could simply update the entrypoint of `method` but if the last JIT GC has
1616       // changed these entrypoints to GenericJNI in preparation for a full GC, we may
1617       // as well change them back as this stub shall not be collected anyway and this
1618       // can avoid a few expensive GenericJNI calls.
1619       data->UpdateEntryPoints(entrypoint);
1620     }
1621     return new_compilation;
1622   } else {
1623     if (compilation_kind == CompilationKind::kBaseline) {
1624       DCHECK(CanAllocateProfilingInfo());
1625     }
1626   }
1627   return true;
1628 }
1629 
NotifyCompilerUse(ArtMethod * method,Thread * self)1630 ProfilingInfo* JitCodeCache::NotifyCompilerUse(ArtMethod* method, Thread* self) {
1631   ScopedDebugDisallowReadBarriers sddrb(self);
1632   MutexLock mu(self, *Locks::jit_lock_);
1633   auto it = profiling_infos_.find(method);
1634   if (it == profiling_infos_.end()) {
1635     return nullptr;
1636   }
1637   if (!it->second->IncrementInlineUse()) {
1638     // Overflow of inlining uses, just bail.
1639     return nullptr;
1640   }
1641   return it->second;
1642 }
1643 
DoneCompilerUse(ArtMethod * method,Thread * self)1644 void JitCodeCache::DoneCompilerUse(ArtMethod* method, Thread* self) {
1645   ScopedDebugDisallowReadBarriers sddrb(self);
1646   MutexLock mu(self, *Locks::jit_lock_);
1647   auto it = profiling_infos_.find(method);
1648   DCHECK(it != profiling_infos_.end());
1649   it->second->DecrementInlineUse();
1650 }
1651 
DoneCompiling(ArtMethod * method,Thread * self)1652 void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self) {
1653   DCHECK_EQ(Thread::Current(), self);
1654   ScopedDebugDisallowReadBarriers sddrb(self);
1655   MutexLock mu(self, *Locks::jit_lock_);
1656   if (UNLIKELY(method->IsNative())) {
1657     auto it = jni_stubs_map_.find(JniStubKey(method));
1658     DCHECK(it != jni_stubs_map_.end());
1659     JniStubData* data = &it->second;
1660     DCHECK(ContainsElement(data->GetMethods(), method));
1661     if (UNLIKELY(!data->IsCompiled())) {
1662       // Failed to compile; the JNI compiler never fails, but the cache may be full.
1663       jni_stubs_map_.erase(it);  // Remove the entry added in NotifyCompilationOf().
1664     }  // else Commit() updated entrypoints of all methods in the JniStubData.
1665   }
1666 }
1667 
InvalidateAllCompiledCode()1668 void JitCodeCache::InvalidateAllCompiledCode() {
1669   Thread* self = Thread::Current();
1670   ScopedDebugDisallowReadBarriers sddrb(self);
1671   art::MutexLock mu(self, *Locks::jit_lock_);
1672   VLOG(jit) << "Invalidating all compiled code";
1673   Runtime* runtime = Runtime::Current();
1674   ClassLinker* linker = runtime->GetClassLinker();
1675   instrumentation::Instrumentation* instr = runtime->GetInstrumentation();
1676 
1677   // Change entry points of native methods back to the GenericJNI entrypoint.
1678   for (const auto& entry : jni_stubs_map_) {
1679     const JniStubData& data = entry.second;
1680     if (!data.IsCompiled() || IsInZygoteExecSpace(data.GetCode())) {
1681       continue;
1682     }
1683     const OatQuickMethodHeader* method_header =
1684         OatQuickMethodHeader::FromCodePointer(data.GetCode());
1685     for (ArtMethod* method : data.GetMethods()) {
1686       if (method->GetEntryPointFromQuickCompiledCode() == method_header->GetEntryPoint()) {
1687         instr->InitializeMethodsCode(method, /*aot_code=*/ nullptr);
1688       }
1689     }
1690   }
1691   for (const auto& entry : method_code_map_) {
1692     ArtMethod* meth = entry.second;
1693     if (UNLIKELY(meth->IsObsolete())) {
1694       linker->SetEntryPointsForObsoleteMethod(meth);
1695     } else {
1696       instr->InitializeMethodsCode(meth, /*aot_code=*/ nullptr);
1697     }
1698   }
1699 
1700   for (const auto& entry : zygote_map_) {
1701     if (entry.method == nullptr) {
1702       continue;
1703     }
1704     if (entry.method->IsPreCompiled()) {
1705       entry.method->ClearPreCompiled();
1706     }
1707     instr->InitializeMethodsCode(entry.method, /*aot_code=*/nullptr);
1708   }
1709 
1710   saved_compiled_methods_map_.clear();
1711   osr_code_map_.clear();
1712 }
1713 
InvalidateCompiledCodeFor(ArtMethod * method,const OatQuickMethodHeader * header)1714 void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
1715                                              const OatQuickMethodHeader* header) {
1716   DCHECK(!method->IsNative());
1717   const void* method_entrypoint = method->GetEntryPointFromQuickCompiledCode();
1718 
1719   // Clear the method counter if we are running jitted code since we might want to jit this again in
1720   // the future.
1721   if (method_entrypoint == header->GetEntryPoint()) {
1722     // The entrypoint is the one to invalidate, so we just update it to the interpreter entry point.
1723     Runtime::Current()->GetInstrumentation()->InitializeMethodsCode(method, /*aot_code=*/ nullptr);
1724   } else {
1725     Thread* self = Thread::Current();
1726     ScopedDebugDisallowReadBarriers sddrb(self);
1727     MutexLock mu(self, *Locks::jit_lock_);
1728     auto it = osr_code_map_.find(method);
1729     if (it != osr_code_map_.end() && OatQuickMethodHeader::FromCodePointer(it->second) == header) {
1730       // Remove the OSR method, to avoid using it again.
1731       osr_code_map_.erase(it);
1732     }
1733   }
1734 
1735   // In case the method was pre-compiled, clear that information so we
1736   // can recompile it ourselves.
1737   if (method->IsPreCompiled()) {
1738     method->ClearPreCompiled();
1739   }
1740 }
1741 
Dump(std::ostream & os)1742 void JitCodeCache::Dump(std::ostream& os) {
1743   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
1744   os << "Current JIT code cache size (used / resident): "
1745      << GetCurrentRegion()->GetUsedMemoryForCode() / KB << "KB / "
1746      << GetCurrentRegion()->GetResidentMemoryForCode() / KB << "KB\n"
1747      << "Current JIT data cache size (used / resident): "
1748      << GetCurrentRegion()->GetUsedMemoryForData() / KB << "KB / "
1749      << GetCurrentRegion()->GetResidentMemoryForData() / KB << "KB\n";
1750   if (!Runtime::Current()->IsZygote()) {
1751     os << "Zygote JIT code cache size (at point of fork): "
1752        << shared_region_.GetUsedMemoryForCode() / KB << "KB / "
1753        << shared_region_.GetResidentMemoryForCode() / KB << "KB\n"
1754        << "Zygote JIT data cache size (at point of fork): "
1755        << shared_region_.GetUsedMemoryForData() / KB << "KB / "
1756        << shared_region_.GetResidentMemoryForData() / KB << "KB\n";
1757   }
1758   os << "Current JIT mini-debug-info size: " << PrettySize(GetJitMiniDebugInfoMemUsage()) << "\n"
1759      << "Current JIT capacity: " << PrettySize(GetCurrentRegion()->GetCurrentCapacity()) << "\n"
1760      << "Current number of JIT JNI stub entries: " << jni_stubs_map_.size() << "\n"
1761      << "Current number of JIT code cache entries: " << method_code_map_.size() << "\n"
1762      << "Total number of JIT baseline compilations: " << number_of_baseline_compilations_ << "\n"
1763      << "Total number of JIT optimized compilations: " << number_of_optimized_compilations_ << "\n"
1764      << "Total number of JIT compilations for on stack replacement: "
1765         << number_of_osr_compilations_ << "\n"
1766      << "Total number of JIT code cache collections: " << number_of_collections_ << std::endl;
1767   histogram_stack_map_memory_use_.PrintMemoryUse(os);
1768   histogram_code_memory_use_.PrintMemoryUse(os);
1769   histogram_profiling_info_memory_use_.PrintMemoryUse(os);
1770 }
1771 
DumpAllCompiledMethods(std::ostream & os)1772 void JitCodeCache::DumpAllCompiledMethods(std::ostream& os) {
1773   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
1774   for (const auto& [code_ptr, meth] : method_code_map_) {  // Includes OSR methods.
1775     OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1776     os << meth->PrettyMethod() << "@"  << std::hex
1777        << code_ptr << "-" << reinterpret_cast<uintptr_t>(code_ptr) + header->GetCodeSize() << '\n';
1778   }
1779   os << "JNIStubs: \n";
1780   for (const auto& [_, data] : jni_stubs_map_) {
1781     const void* code_ptr = data.GetCode();
1782     if (code_ptr == nullptr) {
1783       continue;
1784     }
1785     OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1786     os << std::hex << code_ptr << "-"
1787        << reinterpret_cast<uintptr_t>(code_ptr) + header->GetCodeSize() << " ";
1788     for (ArtMethod* m : data.GetMethods()) {
1789       os << m->PrettyMethod() << ";";
1790     }
1791     os << "\n";
1792   }
1793 }
1794 
PostForkChildAction(bool is_system_server,bool is_zygote)1795 void JitCodeCache::PostForkChildAction(bool is_system_server, bool is_zygote) {
1796   Thread* self = Thread::Current();
1797 
1798   // Remove potential tasks that have been inherited from the zygote.
1799   // We do this now and not in Jit::PostForkChildAction, as system server calls
1800   // JitCodeCache::PostForkChildAction first, and then does some code loading
1801   // that may result in new JIT tasks that we want to keep.
1802   Runtime* runtime = Runtime::Current();
1803   JitThreadPool* pool = runtime->GetJit()->GetThreadPool();
1804   if (pool != nullptr) {
1805     pool->RemoveAllTasks(self);
1806   }
1807 
1808   MutexLock mu(self, *Locks::jit_lock_);
1809 
1810   // Reset potential writable MemMaps inherited from the zygote. We never want
1811   // to write to them.
1812   shared_region_.ResetWritableMappings();
1813 
1814   if (is_zygote || runtime->IsSafeMode()) {
1815     // Don't create a private region for a child zygote. Regions are usually map shared
1816     // (to satisfy dual-view), and we don't want children of a child zygote to inherit it.
1817     return;
1818   }
1819 
1820   // Reset all statistics to be specific to this process.
1821   number_of_baseline_compilations_ = 0;
1822   number_of_optimized_compilations_ = 0;
1823   number_of_osr_compilations_ = 0;
1824   number_of_collections_ = 0;
1825   histogram_stack_map_memory_use_.Reset();
1826   histogram_code_memory_use_.Reset();
1827   histogram_profiling_info_memory_use_.Reset();
1828 
1829   size_t initial_capacity = runtime->GetJITOptions()->GetCodeCacheInitialCapacity();
1830   size_t max_capacity = runtime->GetJITOptions()->GetCodeCacheMaxCapacity();
1831   std::string error_msg;
1832   if (!private_region_.Initialize(initial_capacity,
1833                                   max_capacity,
1834                                   /* rwx_memory_allowed= */ !is_system_server,
1835                                   is_zygote,
1836                                   &error_msg)) {
1837     LOG(FATAL) << "Could not create private region after zygote fork: " << error_msg;
1838   }
1839   if (private_region_.HasCodeMapping()) {
1840     const MemMap* exec_pages = private_region_.GetExecPages();
1841     runtime->AddGeneratedCodeRange(exec_pages->Begin(), exec_pages->Size());
1842   }
1843 }
1844 
GetCurrentRegion()1845 JitMemoryRegion* JitCodeCache::GetCurrentRegion() {
1846   return Runtime::Current()->IsZygote() ? &shared_region_ : &private_region_;
1847 }
1848 
VisitAllMethods(const std::function<void (const void *,ArtMethod *)> & cb)1849 void JitCodeCache::VisitAllMethods(const std::function<void(const void*, ArtMethod*)>& cb) {
1850   for (const auto& it : jni_stubs_map_) {
1851     const JniStubData& data = it.second;
1852     if (data.IsCompiled()) {
1853       for (ArtMethod* method : data.GetMethods()) {
1854         cb(data.GetCode(), method);
1855       }
1856     }
1857   }
1858   for (const auto& it : method_code_map_) {  // Includes OSR methods.
1859     cb(it.first, it.second);
1860   }
1861   for (const auto& it : saved_compiled_methods_map_) {
1862     cb(it.second, it.first);
1863   }
1864   for (const auto& it : zygote_map_) {
1865     if (it.code_ptr != nullptr && it.method != nullptr) {
1866       cb(it.code_ptr, it.method);
1867     }
1868   }
1869 }
1870 
Initialize(uint32_t number_of_methods)1871 void ZygoteMap::Initialize(uint32_t number_of_methods) {
1872   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
1873   // Allocate for 40-80% capacity. This will offer OK lookup times, and termination
1874   // cases.
1875   size_t capacity = RoundUpToPowerOfTwo(number_of_methods * 100 / 80);
1876   const uint8_t* memory = region_->AllocateData(
1877       capacity * sizeof(Entry) + sizeof(ZygoteCompilationState));
1878   if (memory == nullptr) {
1879     LOG(WARNING) << "Could not allocate data for the zygote map";
1880     return;
1881   }
1882   const Entry* data = reinterpret_cast<const Entry*>(memory);
1883   region_->FillData(data, capacity, Entry { nullptr, nullptr });
1884   map_ = ArrayRef(data, capacity);
1885   compilation_state_ = reinterpret_cast<const ZygoteCompilationState*>(
1886       memory + capacity * sizeof(Entry));
1887   region_->WriteData(compilation_state_, ZygoteCompilationState::kInProgress);
1888 }
1889 
GetCodeFor(ArtMethod * method,uintptr_t pc) const1890 const void* ZygoteMap::GetCodeFor(ArtMethod* method, uintptr_t pc) const {
1891   if (map_.empty()) {
1892     return nullptr;
1893   }
1894 
1895   if (method == nullptr) {
1896     // Do a linear search. This should only be used in debug builds.
1897     CHECK(kIsDebugBuild);
1898     for (const Entry& entry : map_) {
1899       const void* code_ptr = entry.code_ptr;
1900       if (code_ptr != nullptr) {
1901         OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1902         if (method_header->Contains(pc)) {
1903           return code_ptr;
1904         }
1905       }
1906     }
1907     return nullptr;
1908   }
1909 
1910   std::hash<ArtMethod*> hf;
1911   size_t index = hf(method) & (map_.size() - 1u);
1912   size_t original_index = index;
1913   // Loop over the array: we know this loop terminates as we will either
1914   // encounter the given method, or a null entry. Both terminate the loop.
1915   // Note that the zygote may concurrently write new entries to the map. That's OK as the
1916   // map is never resized.
1917   while (true) {
1918     const Entry& entry = map_[index];
1919     if (entry.method == nullptr) {
1920       // Not compiled yet.
1921       return nullptr;
1922     }
1923     if (entry.method == method) {
1924       if (entry.code_ptr == nullptr) {
1925         // This is a race with the zygote which wrote the method, but hasn't written the
1926         // code. Just bail and wait for the next time we need the method.
1927         return nullptr;
1928       }
1929       if (pc != 0 && !OatQuickMethodHeader::FromCodePointer(entry.code_ptr)->Contains(pc)) {
1930         return nullptr;
1931       }
1932       return entry.code_ptr;
1933     }
1934     index = (index + 1) & (map_.size() - 1);
1935     DCHECK_NE(original_index, index);
1936   }
1937 }
1938 
Put(const void * code,ArtMethod * method)1939 void ZygoteMap::Put(const void* code, ArtMethod* method) {
1940   if (map_.empty()) {
1941     return;
1942   }
1943   CHECK(Runtime::Current()->IsZygote());
1944   std::hash<ArtMethod*> hf;
1945   size_t index = hf(method) & (map_.size() - 1);
1946   size_t original_index = index;
1947   // Because the size of the map is bigger than the number of methods that will
1948   // be added, we are guaranteed to find a free slot in the array, and
1949   // therefore for this loop to terminate.
1950   while (true) {
1951     const Entry* entry = &map_[index];
1952     if (entry->method == nullptr) {
1953       // Note that readers can read this memory concurrently, but that's OK as
1954       // we are writing pointers.
1955       region_->WriteData(entry, Entry { method, code });
1956       break;
1957     }
1958     index = (index + 1) & (map_.size() - 1);
1959     DCHECK_NE(original_index, index);
1960   }
1961   DCHECK_EQ(GetCodeFor(method), code);
1962 }
1963 
1964 }  // namespace jit
1965 }  // namespace art
1966