• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "jit_code_cache.h"
18 
19 #include <sstream>
20 
21 #include <android-base/logging.h>
22 
23 #include "arch/context.h"
24 #include "art_method-inl.h"
25 #include "base/enums.h"
26 #include "base/histogram-inl.h"
27 #include "base/logging.h"  // For VLOG.
28 #include "base/membarrier.h"
29 #include "base/memfd.h"
30 #include "base/mem_map.h"
31 #include "base/quasi_atomic.h"
32 #include "base/stl_util.h"
33 #include "base/systrace.h"
34 #include "base/time_utils.h"
35 #include "base/utils.h"
36 #include "cha.h"
37 #include "debugger_interface.h"
38 #include "dex/dex_file_loader.h"
39 #include "dex/method_reference.h"
40 #include "entrypoints/entrypoint_utils-inl.h"
41 #include "entrypoints/runtime_asm_entrypoints.h"
42 #include "gc/accounting/bitmap-inl.h"
43 #include "gc/allocator/art-dlmalloc.h"
44 #include "gc/scoped_gc_critical_section.h"
45 #include "handle.h"
46 #include "handle_scope-inl.h"
47 #include "instrumentation.h"
48 #include "intern_table.h"
49 #include "jit/jit.h"
50 #include "jit/profiling_info.h"
51 #include "jit/jit_scoped_code_cache_write.h"
52 #include "linear_alloc.h"
53 #include "oat_file-inl.h"
54 #include "oat_quick_method_header.h"
55 #include "object_callbacks.h"
56 #include "profile/profile_compilation_info.h"
57 #include "scoped_thread_state_change-inl.h"
58 #include "stack.h"
59 #include "thread-current-inl.h"
60 #include "thread-inl.h"
61 #include "thread_list.h"
62 
63 namespace art {
64 namespace jit {
65 
66 static constexpr size_t kCodeSizeLogThreshold = 50 * KB;
67 static constexpr size_t kStackMapSizeLogThreshold = 50 * KB;
68 
69 class JitCodeCache::JniStubKey {
70  public:
REQUIRES_SHARED(Locks::mutator_lock_)71   explicit JniStubKey(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_)
72       : shorty_(method->GetShorty()),
73         is_static_(method->IsStatic()),
74         is_fast_native_(method->IsFastNative()),
75         is_critical_native_(method->IsCriticalNative()),
76         is_synchronized_(method->IsSynchronized()) {
77     DCHECK(!(is_fast_native_ && is_critical_native_));
78   }
79 
operator <(const JniStubKey & rhs) const80   bool operator<(const JniStubKey& rhs) const {
81     if (is_static_ != rhs.is_static_) {
82       return rhs.is_static_;
83     }
84     if (is_synchronized_ != rhs.is_synchronized_) {
85       return rhs.is_synchronized_;
86     }
87     if (is_fast_native_ != rhs.is_fast_native_) {
88       return rhs.is_fast_native_;
89     }
90     if (is_critical_native_ != rhs.is_critical_native_) {
91       return rhs.is_critical_native_;
92     }
93     return strcmp(shorty_, rhs.shorty_) < 0;
94   }
95 
96   // Update the shorty to point to another method's shorty. Call this function when removing
97   // the method that references the old shorty from JniCodeData and not removing the entire
98   // JniCodeData; the old shorty may become a dangling pointer when that method is unloaded.
UpdateShorty(ArtMethod * method) const99   void UpdateShorty(ArtMethod* method) const REQUIRES_SHARED(Locks::mutator_lock_) {
100     const char* shorty = method->GetShorty();
101     DCHECK_STREQ(shorty_, shorty);
102     shorty_ = shorty;
103   }
104 
105  private:
106   // The shorty points to a DexFile data and may need to change
107   // to point to the same shorty in a different DexFile.
108   mutable const char* shorty_;
109 
110   const bool is_static_;
111   const bool is_fast_native_;
112   const bool is_critical_native_;
113   const bool is_synchronized_;
114 };
115 
116 class JitCodeCache::JniStubData {
117  public:
JniStubData()118   JniStubData() : code_(nullptr), methods_() {}
119 
SetCode(const void * code)120   void SetCode(const void* code) {
121     DCHECK(code != nullptr);
122     code_ = code;
123   }
124 
UpdateEntryPoints(const void * entrypoint)125   void UpdateEntryPoints(const void* entrypoint) REQUIRES_SHARED(Locks::mutator_lock_) {
126     DCHECK(IsCompiled());
127     DCHECK(entrypoint == OatQuickMethodHeader::FromCodePointer(GetCode())->GetEntryPoint());
128     instrumentation::Instrumentation* instrum = Runtime::Current()->GetInstrumentation();
129     for (ArtMethod* m : GetMethods()) {
130       // Because `m` might be in the process of being deleted,
131       //   - use the `ArtMethod::StillNeedsClinitCheckMayBeDead()` to check if
132       //     we can update the entrypoint, and
133       //   - call `Instrumentation::UpdateNativeMethodsCodeToJitCode` instead of the
134       //     more generic function `Instrumentation::UpdateMethodsCode()`.
135       // The `ArtMethod::StillNeedsClinitCheckMayBeDead()` checks the class status
136       // in the to-space object if any even if the method's declaring class points to
137       // the from-space class object. This way we do not miss updating an entrypoint
138       // even under uncommon circumstances, when during a GC the class becomes visibly
139       // initialized, the method becomes hot, we compile the thunk and want to update
140       // the entrypoint while the method's declaring class field still points to the
141       // from-space class object with the old status.
142       if (!m->StillNeedsClinitCheckMayBeDead()) {
143         instrum->UpdateNativeMethodsCodeToJitCode(m, entrypoint);
144       }
145     }
146   }
147 
GetCode() const148   const void* GetCode() const {
149     return code_;
150   }
151 
IsCompiled() const152   bool IsCompiled() const {
153     return GetCode() != nullptr;
154   }
155 
AddMethod(ArtMethod * method)156   void AddMethod(ArtMethod* method) {
157     if (!ContainsElement(methods_, method)) {
158       methods_.push_back(method);
159     }
160   }
161 
GetMethods() const162   const std::vector<ArtMethod*>& GetMethods() const {
163     return methods_;
164   }
165 
RemoveMethodsIn(const LinearAlloc & alloc)166   void RemoveMethodsIn(const LinearAlloc& alloc) REQUIRES_SHARED(Locks::mutator_lock_) {
167     auto kept_end = std::partition(
168         methods_.begin(),
169         methods_.end(),
170         [&alloc](ArtMethod* method) { return !alloc.ContainsUnsafe(method); });
171     for (auto it = kept_end; it != methods_.end(); it++) {
172       VLOG(jit) << "JIT removed (JNI) " << (*it)->PrettyMethod() << ": " << code_;
173     }
174     methods_.erase(kept_end, methods_.end());
175   }
176 
RemoveMethod(ArtMethod * method)177   bool RemoveMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
178     auto it = std::find(methods_.begin(), methods_.end(), method);
179     if (it != methods_.end()) {
180       VLOG(jit) << "JIT removed (JNI) " << (*it)->PrettyMethod() << ": " << code_;
181       methods_.erase(it);
182       return true;
183     } else {
184       return false;
185     }
186   }
187 
MoveObsoleteMethod(ArtMethod * old_method,ArtMethod * new_method)188   void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) {
189     std::replace(methods_.begin(), methods_.end(), old_method, new_method);
190   }
191 
192  private:
193   const void* code_;
194   std::vector<ArtMethod*> methods_;
195 };
196 
Create(bool used_only_for_profile_data,bool rwx_memory_allowed,bool is_zygote,std::string * error_msg)197 JitCodeCache* JitCodeCache::Create(bool used_only_for_profile_data,
198                                    bool rwx_memory_allowed,
199                                    bool is_zygote,
200                                    std::string* error_msg) {
201   // Register for membarrier expedited sync core if JIT will be generating code.
202   if (!used_only_for_profile_data) {
203     if (art::membarrier(art::MembarrierCommand::kRegisterPrivateExpeditedSyncCore) != 0) {
204       // MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE ensures that CPU instruction pipelines are
205       // flushed and it's used when adding code to the JIT. The memory used by the new code may
206       // have just been released and, in theory, the old code could still be in a pipeline.
207       VLOG(jit) << "Kernel does not support membarrier sync-core";
208     }
209   }
210 
211   Runtime* runtime = Runtime::Current();
212   size_t initial_capacity = runtime->GetJITOptions()->GetCodeCacheInitialCapacity();
213   // Check whether the provided max capacity in options is below 1GB.
214   size_t max_capacity = runtime->GetJITOptions()->GetCodeCacheMaxCapacity();
215   // We need to have 32 bit offsets from method headers in code cache which point to things
216   // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
217   // Ensure we're below 1 GB to be safe.
218   if (max_capacity > 1 * GB) {
219     std::ostringstream oss;
220     oss << "Maxium code cache capacity is limited to 1 GB, "
221         << PrettySize(max_capacity) << " is too big";
222     *error_msg = oss.str();
223     return nullptr;
224   }
225 
226   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
227   JitMemoryRegion region;
228   if (!region.Initialize(initial_capacity,
229                          max_capacity,
230                          rwx_memory_allowed,
231                          is_zygote,
232                          error_msg)) {
233     return nullptr;
234   }
235 
236   if (region.HasCodeMapping()) {
237     const MemMap* exec_pages = region.GetExecPages();
238     runtime->AddGeneratedCodeRange(exec_pages->Begin(), exec_pages->Size());
239   }
240 
241   std::unique_ptr<JitCodeCache> jit_code_cache(new JitCodeCache());
242   if (is_zygote) {
243     // Zygote should never collect code to share the memory with the children.
244     jit_code_cache->garbage_collect_code_ = false;
245     jit_code_cache->shared_region_ = std::move(region);
246   } else {
247     jit_code_cache->private_region_ = std::move(region);
248   }
249 
250   VLOG(jit) << "Created jit code cache: initial capacity="
251             << PrettySize(initial_capacity)
252             << ", maximum capacity="
253             << PrettySize(max_capacity);
254 
255   return jit_code_cache.release();
256 }
257 
JitCodeCache()258 JitCodeCache::JitCodeCache()
259     : is_weak_access_enabled_(true),
260       inline_cache_cond_("Jit inline cache condition variable", *Locks::jit_lock_),
261       zygote_map_(&shared_region_),
262       lock_cond_("Jit code cache condition variable", *Locks::jit_lock_),
263       collection_in_progress_(false),
264       last_collection_increased_code_cache_(false),
265       garbage_collect_code_(true),
266       number_of_baseline_compilations_(0),
267       number_of_optimized_compilations_(0),
268       number_of_osr_compilations_(0),
269       number_of_collections_(0),
270       histogram_stack_map_memory_use_("Memory used for stack maps", 16),
271       histogram_code_memory_use_("Memory used for compiled code", 16),
272       histogram_profiling_info_memory_use_("Memory used for profiling info", 16) {
273 }
274 
~JitCodeCache()275 JitCodeCache::~JitCodeCache() {
276   if (private_region_.HasCodeMapping()) {
277     const MemMap* exec_pages = private_region_.GetExecPages();
278     Runtime::Current()->RemoveGeneratedCodeRange(exec_pages->Begin(), exec_pages->Size());
279   }
280   if (shared_region_.HasCodeMapping()) {
281     const MemMap* exec_pages = shared_region_.GetExecPages();
282     Runtime::Current()->RemoveGeneratedCodeRange(exec_pages->Begin(), exec_pages->Size());
283   }
284 }
285 
PrivateRegionContainsPc(const void * ptr) const286 bool JitCodeCache::PrivateRegionContainsPc(const void* ptr) const {
287   return private_region_.IsInExecSpace(ptr);
288 }
289 
ContainsPc(const void * ptr) const290 bool JitCodeCache::ContainsPc(const void* ptr) const {
291   return PrivateRegionContainsPc(ptr) || shared_region_.IsInExecSpace(ptr);
292 }
293 
ContainsMethod(ArtMethod * method)294 bool JitCodeCache::ContainsMethod(ArtMethod* method) {
295   Thread* self = Thread::Current();
296   ScopedDebugDisallowReadBarriers sddrb(self);
297   MutexLock mu(self, *Locks::jit_lock_);
298   if (UNLIKELY(method->IsNative())) {
299     auto it = jni_stubs_map_.find(JniStubKey(method));
300     if (it != jni_stubs_map_.end() &&
301         it->second.IsCompiled() &&
302         ContainsElement(it->second.GetMethods(), method)) {
303       return true;
304     }
305   } else {
306     for (const auto& it : method_code_map_) {
307       if (it.second == method) {
308         return true;
309       }
310     }
311     if (zygote_map_.ContainsMethod(method)) {
312       return true;
313     }
314   }
315   return false;
316 }
317 
GetJniStubCode(ArtMethod * method)318 const void* JitCodeCache::GetJniStubCode(ArtMethod* method) {
319   DCHECK(method->IsNative());
320   Thread* self = Thread::Current();
321   ScopedDebugDisallowReadBarriers sddrb(self);
322   MutexLock mu(self, *Locks::jit_lock_);
323   auto it = jni_stubs_map_.find(JniStubKey(method));
324   if (it != jni_stubs_map_.end()) {
325     JniStubData& data = it->second;
326     if (data.IsCompiled() && ContainsElement(data.GetMethods(), method)) {
327       return data.GetCode();
328     }
329   }
330   return nullptr;
331 }
332 
GetSavedEntryPointOfPreCompiledMethod(ArtMethod * method)333 const void* JitCodeCache::GetSavedEntryPointOfPreCompiledMethod(ArtMethod* method) {
334   Thread* self = Thread::Current();
335   ScopedDebugDisallowReadBarriers sddrb(self);
336   if (method->IsPreCompiled()) {
337     const void* code_ptr = nullptr;
338     if (method->GetDeclaringClass<kWithoutReadBarrier>()->IsBootStrapClassLoaded()) {
339       code_ptr = zygote_map_.GetCodeFor(method);
340     } else {
341       MutexLock mu(self, *Locks::jit_lock_);
342       auto it = saved_compiled_methods_map_.find(method);
343       if (it != saved_compiled_methods_map_.end()) {
344         code_ptr = it->second;
345       }
346     }
347     if (code_ptr != nullptr) {
348       OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
349       return method_header->GetEntryPoint();
350     }
351   }
352   return nullptr;
353 }
354 
WaitForPotentialCollectionToComplete(Thread * self)355 bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) {
356   bool in_collection = false;
357   while (collection_in_progress_) {
358     in_collection = true;
359     lock_cond_.Wait(self);
360   }
361   return in_collection;
362 }
363 
FromCodeToAllocation(const void * code)364 static uintptr_t FromCodeToAllocation(const void* code) {
365   size_t alignment = GetInstructionSetCodeAlignment(kRuntimeISA);
366   return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment);
367 }
368 
FromAllocationToCode(const uint8_t * alloc)369 static const void* FromAllocationToCode(const uint8_t* alloc) {
370   size_t alignment = GetInstructionSetCodeAlignment(kRuntimeISA);
371   return reinterpret_cast<const void*>(alloc + RoundUp(sizeof(OatQuickMethodHeader), alignment));
372 }
373 
GetNumberOfRoots(const uint8_t * stack_map)374 static uint32_t GetNumberOfRoots(const uint8_t* stack_map) {
375   // The length of the table is stored just before the stack map (and therefore at the end of
376   // the table itself), in order to be able to fetch it from a `stack_map` pointer.
377   return reinterpret_cast<const uint32_t*>(stack_map)[-1];
378 }
379 
DCheckRootsAreValid(const std::vector<Handle<mirror::Object>> & roots,bool is_shared_region)380 static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots,
381                                 bool is_shared_region)
382     REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
383   if (!kIsDebugBuild) {
384     return;
385   }
386   // Put all roots in `roots_data`.
387   for (Handle<mirror::Object> object : roots) {
388     // Ensure the string is strongly interned. b/32995596
389     if (object->IsString()) {
390       ObjPtr<mirror::String> str = object->AsString();
391       ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
392       CHECK(class_linker->GetInternTable()->LookupStrong(Thread::Current(), str) != nullptr);
393     }
394     // Ensure that we don't put movable objects in the shared region.
395     if (is_shared_region) {
396       CHECK(!Runtime::Current()->GetHeap()->IsMovableObject(object.Get()));
397     }
398   }
399 }
400 
GetRootTable(const void * code_ptr,uint32_t * number_of_roots=nullptr)401 static const uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = nullptr) {
402   OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
403   uint8_t* data = method_header->GetOptimizedCodeInfoPtr();
404   uint32_t roots = GetNumberOfRoots(data);
405   if (number_of_roots != nullptr) {
406     *number_of_roots = roots;
407   }
408   return data - ComputeRootTableSize(roots);
409 }
410 
SweepRootTables(IsMarkedVisitor * visitor)411 void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) {
412   Thread* self = Thread::Current();
413   ScopedDebugDisallowReadBarriers sddrb(self);
414   MutexLock mu(self, *Locks::jit_lock_);
415   for (const auto& entry : method_code_map_) {
416     uint32_t number_of_roots = 0;
417     const uint8_t* root_table = GetRootTable(entry.first, &number_of_roots);
418     uint8_t* roots_data = private_region_.IsInDataSpace(root_table)
419         ? private_region_.GetWritableDataAddress(root_table)
420         : shared_region_.GetWritableDataAddress(root_table);
421     GcRoot<mirror::Object>* roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
422     for (uint32_t i = 0; i < number_of_roots; ++i) {
423       // This does not need a read barrier because this is called by GC.
424       mirror::Object* object = roots[i].Read<kWithoutReadBarrier>();
425       if (object == nullptr || object == Runtime::GetWeakClassSentinel()) {
426         // entry got deleted in a previous sweep.
427       } else if (object->IsString<kDefaultVerifyFlags>()) {
428         mirror::Object* new_object = visitor->IsMarked(object);
429         // We know the string is marked because it's a strongly-interned string that
430         // is always alive.
431         // TODO: Do not use IsMarked for j.l.Class, and adjust once we move this method
432         // out of the weak access/creation pause. b/32167580
433         DCHECK_NE(new_object, nullptr) << "old-string:" << object;
434         if (new_object != object) {
435           roots[i] = GcRoot<mirror::Object>(new_object);
436         }
437       } else {
438         mirror::Object* new_klass = visitor->IsMarked(object);
439         if (new_klass == nullptr) {
440           roots[i] = GcRoot<mirror::Object>(Runtime::GetWeakClassSentinel());
441         } else if (new_klass != object) {
442           roots[i] = GcRoot<mirror::Object>(new_klass);
443         }
444       }
445     }
446   }
447   // Walk over inline caches to clear entries containing unloaded classes.
448   for (auto it : profiling_infos_) {
449     ProfilingInfo* info = it.second;
450     for (size_t i = 0; i < info->number_of_inline_caches_; ++i) {
451       InlineCache* cache = &info->cache_[i];
452       for (size_t j = 0; j < InlineCache::kIndividualCacheSize; ++j) {
453         mirror::Class* klass = cache->classes_[j].Read<kWithoutReadBarrier>();
454         if (klass != nullptr) {
455           mirror::Class* new_klass = down_cast<mirror::Class*>(visitor->IsMarked(klass));
456           if (new_klass != klass) {
457             cache->classes_[j] = GcRoot<mirror::Class>(new_klass);
458           }
459         }
460       }
461     }
462   }
463 }
464 
FreeCodeAndData(const void * code_ptr)465 void JitCodeCache::FreeCodeAndData(const void* code_ptr) {
466   if (IsInZygoteExecSpace(code_ptr)) {
467     // No need to free, this is shared memory.
468     return;
469   }
470   uintptr_t allocation = FromCodeToAllocation(code_ptr);
471   const uint8_t* data = nullptr;
472   if (OatQuickMethodHeader::FromCodePointer(code_ptr)->IsOptimized()) {
473     data = GetRootTable(code_ptr);
474   }  // else this is a JNI stub without any data.
475 
476   FreeLocked(&private_region_, reinterpret_cast<uint8_t*>(allocation), data);
477 }
478 
FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader * > & method_headers)479 void JitCodeCache::FreeAllMethodHeaders(
480     const std::unordered_set<OatQuickMethodHeader*>& method_headers) {
481   // We need to remove entries in method_headers from CHA dependencies
482   // first since once we do FreeCode() below, the memory can be reused
483   // so it's possible for the same method_header to start representing
484   // different compile code.
485   {
486     MutexLock mu2(Thread::Current(), *Locks::cha_lock_);
487     Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis()
488         ->RemoveDependentsWithMethodHeaders(method_headers);
489   }
490 
491   ScopedCodeCacheWrite scc(private_region_);
492   for (const OatQuickMethodHeader* method_header : method_headers) {
493     FreeCodeAndData(method_header->GetCode());
494   }
495 
496   // We have potentially removed a lot of debug info. Do maintenance pass to save space.
497   RepackNativeDebugInfoForJit();
498 
499   // Check that the set of compiled methods exactly matches native debug information.
500   // Does not check zygote methods since they can change concurrently.
501   if (kIsDebugBuild && !Runtime::Current()->IsZygote()) {
502     std::map<const void*, ArtMethod*> compiled_methods;
503     VisitAllMethods([&](const void* addr, ArtMethod* method) {
504       if (!IsInZygoteExecSpace(addr)) {
505         CHECK(addr != nullptr && method != nullptr);
506         compiled_methods.emplace(addr, method);
507       }
508     });
509     std::set<const void*> debug_info;
510     ForEachNativeDebugSymbol([&](const void* addr, size_t, const char* name) {
511       addr = AlignDown(addr, GetInstructionSetInstructionAlignment(kRuntimeISA));  // Thumb-bit.
512       CHECK(debug_info.emplace(addr).second) << "Duplicate debug info: " << addr << " " << name;
513       CHECK_EQ(compiled_methods.count(addr), 1u) << "Extra debug info: " << addr << " " << name;
514     });
515     if (!debug_info.empty()) {  // If debug-info generation is enabled.
516       for (auto it : compiled_methods) {
517         CHECK_EQ(debug_info.count(it.first), 1u) << "No debug info: " << it.second->PrettyMethod();
518       }
519       CHECK_EQ(compiled_methods.size(), debug_info.size());
520     }
521   }
522 }
523 
RemoveMethodsIn(Thread * self,const LinearAlloc & alloc)524 void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
525   ScopedTrace trace(__PRETTY_FUNCTION__);
526   ScopedDebugDisallowReadBarriers sddrb(self);
527   // We use a set to first collect all method_headers whose code need to be
528   // removed. We need to free the underlying code after we remove CHA dependencies
529   // for entries in this set. And it's more efficient to iterate through
530   // the CHA dependency map just once with an unordered_set.
531   std::unordered_set<OatQuickMethodHeader*> method_headers;
532   {
533     MutexLock mu(self, *Locks::jit_lock_);
534     // We do not check if a code cache GC is in progress, as this method comes
535     // with the classlinker_classes_lock_ held, and suspending ourselves could
536     // lead to a deadlock.
537     {
538       for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
539         it->second.RemoveMethodsIn(alloc);
540         if (it->second.GetMethods().empty()) {
541           method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->second.GetCode()));
542           it = jni_stubs_map_.erase(it);
543         } else {
544           it->first.UpdateShorty(it->second.GetMethods().front());
545           ++it;
546         }
547       }
548       for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
549         if (alloc.ContainsUnsafe(it->second)) {
550           method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->first));
551           VLOG(jit) << "JIT removed " << it->second->PrettyMethod() << ": " << it->first;
552           it = method_code_map_.erase(it);
553         } else {
554           ++it;
555         }
556       }
557     }
558     for (auto it = osr_code_map_.begin(); it != osr_code_map_.end();) {
559       if (alloc.ContainsUnsafe(it->first)) {
560         // Note that the code has already been pushed to method_headers in the loop
561         // above and is going to be removed in FreeCode() below.
562         it = osr_code_map_.erase(it);
563       } else {
564         ++it;
565       }
566     }
567     for (auto it = profiling_infos_.begin(); it != profiling_infos_.end();) {
568       ProfilingInfo* info = it->second;
569       if (alloc.ContainsUnsafe(info->GetMethod())) {
570         private_region_.FreeWritableData(reinterpret_cast<uint8_t*>(info));
571         it = profiling_infos_.erase(it);
572       } else {
573         ++it;
574       }
575     }
576     FreeAllMethodHeaders(method_headers);
577   }
578 }
579 
IsWeakAccessEnabled(Thread * self) const580 bool JitCodeCache::IsWeakAccessEnabled(Thread* self) const {
581   return gUseReadBarrier
582       ? self->GetWeakRefAccessEnabled()
583       : is_weak_access_enabled_.load(std::memory_order_seq_cst);
584 }
585 
WaitUntilInlineCacheAccessible(Thread * self)586 void JitCodeCache::WaitUntilInlineCacheAccessible(Thread* self) {
587   if (IsWeakAccessEnabled(self)) {
588     return;
589   }
590   ScopedThreadSuspension sts(self, ThreadState::kWaitingWeakGcRootRead);
591   MutexLock mu(self, *Locks::jit_lock_);
592   while (!IsWeakAccessEnabled(self)) {
593     inline_cache_cond_.Wait(self);
594   }
595 }
596 
BroadcastForInlineCacheAccess()597 void JitCodeCache::BroadcastForInlineCacheAccess() {
598   Thread* self = Thread::Current();
599   MutexLock mu(self, *Locks::jit_lock_);
600   inline_cache_cond_.Broadcast(self);
601 }
602 
AllowInlineCacheAccess()603 void JitCodeCache::AllowInlineCacheAccess() {
604   DCHECK(!gUseReadBarrier);
605   is_weak_access_enabled_.store(true, std::memory_order_seq_cst);
606   BroadcastForInlineCacheAccess();
607 }
608 
DisallowInlineCacheAccess()609 void JitCodeCache::DisallowInlineCacheAccess() {
610   DCHECK(!gUseReadBarrier);
611   is_weak_access_enabled_.store(false, std::memory_order_seq_cst);
612 }
613 
CopyInlineCacheInto(const InlineCache & ic,StackHandleScope<InlineCache::kIndividualCacheSize> * classes)614 void JitCodeCache::CopyInlineCacheInto(
615     const InlineCache& ic,
616     /*out*/StackHandleScope<InlineCache::kIndividualCacheSize>* classes) {
617   static_assert(arraysize(ic.classes_) == InlineCache::kIndividualCacheSize);
618   DCHECK_EQ(classes->NumberOfReferences(), InlineCache::kIndividualCacheSize);
619   DCHECK_EQ(classes->RemainingSlots(), InlineCache::kIndividualCacheSize);
620   WaitUntilInlineCacheAccessible(Thread::Current());
621   // Note that we don't need to lock `lock_` here, the compiler calling
622   // this method has already ensured the inline cache will not be deleted.
623   for (const GcRoot<mirror::Class>& root : ic.classes_) {
624     mirror::Class* object = root.Read();
625     if (object != nullptr) {
626       DCHECK_NE(classes->RemainingSlots(), 0u);
627       classes->NewHandle(object);
628     }
629   }
630 }
631 
ClearMethodCounter(ArtMethod * method,bool was_warm)632 static void ClearMethodCounter(ArtMethod* method, bool was_warm)
633     REQUIRES_SHARED(Locks::mutator_lock_) {
634   if (was_warm) {
635     method->SetPreviouslyWarm();
636   }
637   method->ResetCounter(Runtime::Current()->GetJITOptions()->GetWarmupThreshold());
638   // We add one sample so that the profile knows that the method was executed at least once.
639   // This is required for layout purposes.
640   method->UpdateCounter(/* new_samples= */ 1);
641 }
642 
WaitForPotentialCollectionToCompleteRunnable(Thread * self)643 void JitCodeCache::WaitForPotentialCollectionToCompleteRunnable(Thread* self) {
644   while (collection_in_progress_) {
645     Locks::jit_lock_->Unlock(self);
646     {
647       ScopedThreadSuspension sts(self, ThreadState::kSuspended);
648       MutexLock mu(self, *Locks::jit_lock_);
649       WaitForPotentialCollectionToComplete(self);
650     }
651     Locks::jit_lock_->Lock(self);
652   }
653 }
654 
Commit(Thread * self,JitMemoryRegion * region,ArtMethod * method,ArrayRef<const uint8_t> reserved_code,ArrayRef<const uint8_t> code,ArrayRef<const uint8_t> reserved_data,const std::vector<Handle<mirror::Object>> & roots,ArrayRef<const uint8_t> stack_map,const std::vector<uint8_t> & debug_info,bool is_full_debug_info,CompilationKind compilation_kind,bool has_should_deoptimize_flag,const ArenaSet<ArtMethod * > & cha_single_implementation_list)655 bool JitCodeCache::Commit(Thread* self,
656                           JitMemoryRegion* region,
657                           ArtMethod* method,
658                           ArrayRef<const uint8_t> reserved_code,
659                           ArrayRef<const uint8_t> code,
660                           ArrayRef<const uint8_t> reserved_data,
661                           const std::vector<Handle<mirror::Object>>& roots,
662                           ArrayRef<const uint8_t> stack_map,
663                           const std::vector<uint8_t>& debug_info,
664                           bool is_full_debug_info,
665                           CompilationKind compilation_kind,
666                           bool has_should_deoptimize_flag,
667                           const ArenaSet<ArtMethod*>& cha_single_implementation_list) {
668   DCHECK_IMPLIES(method->IsNative(), (compilation_kind != CompilationKind::kOsr));
669 
670   if (!method->IsNative()) {
671     // We need to do this before grabbing the lock_ because it needs to be able to see the string
672     // InternTable. Native methods do not have roots.
673     DCheckRootsAreValid(roots, IsSharedRegion(*region));
674   }
675 
676   const uint8_t* roots_data = reserved_data.data();
677   size_t root_table_size = ComputeRootTableSize(roots.size());
678   const uint8_t* stack_map_data = roots_data + root_table_size;
679 
680   MutexLock mu(self, *Locks::jit_lock_);
681   // We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to
682   // finish.
683   WaitForPotentialCollectionToCompleteRunnable(self);
684   const uint8_t* code_ptr = region->CommitCode(
685       reserved_code, code, stack_map_data, has_should_deoptimize_flag);
686   if (code_ptr == nullptr) {
687     return false;
688   }
689   OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
690 
691   // Commit roots and stack maps before updating the entry point.
692   if (!region->CommitData(reserved_data, roots, stack_map)) {
693     return false;
694   }
695 
696   switch (compilation_kind) {
697     case CompilationKind::kOsr:
698       number_of_osr_compilations_++;
699       break;
700     case CompilationKind::kBaseline:
701       number_of_baseline_compilations_++;
702       break;
703     case CompilationKind::kOptimized:
704       number_of_optimized_compilations_++;
705       break;
706   }
707 
708   // We need to update the debug info before the entry point gets set.
709   // At the same time we want to do under JIT lock so that debug info and JIT maps are in sync.
710   if (!debug_info.empty()) {
711     // NB: Don't allow packing of full info since it would remove non-backtrace data.
712     AddNativeDebugInfoForJit(code_ptr, debug_info, /*allow_packing=*/ !is_full_debug_info);
713   }
714 
715   // We need to update the entry point in the runnable state for the instrumentation.
716   {
717     // The following needs to be guarded by cha_lock_ also. Otherwise it's possible that the
718     // compiled code is considered invalidated by some class linking, but below we still make the
719     // compiled code valid for the method.  Need cha_lock_ for checking all single-implementation
720     // flags and register dependencies.
721     {
722       ScopedDebugDisallowReadBarriers sddrb(self);
723       MutexLock cha_mu(self, *Locks::cha_lock_);
724       bool single_impl_still_valid = true;
725       for (ArtMethod* single_impl : cha_single_implementation_list) {
726         if (!single_impl->HasSingleImplementation()) {
727           // Simply discard the compiled code. Clear the counter so that it may be recompiled later.
728           // Hopefully the class hierarchy will be more stable when compilation is retried.
729           single_impl_still_valid = false;
730           ClearMethodCounter(method, /*was_warm=*/ false);
731           break;
732         }
733       }
734 
735       // Discard the code if any single-implementation assumptions are now invalid.
736       if (UNLIKELY(!single_impl_still_valid)) {
737         VLOG(jit) << "JIT discarded jitted code due to invalid single-implementation assumptions.";
738         return false;
739       }
740       DCHECK(cha_single_implementation_list.empty() || !Runtime::Current()->IsJavaDebuggable())
741           << "Should not be using cha on debuggable apps/runs!";
742 
743       ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
744       for (ArtMethod* single_impl : cha_single_implementation_list) {
745         class_linker->GetClassHierarchyAnalysis()->AddDependency(
746             single_impl, method, method_header);
747       }
748     }
749 
750     if (UNLIKELY(method->IsNative())) {
751       ScopedDebugDisallowReadBarriers sddrb(self);
752       auto it = jni_stubs_map_.find(JniStubKey(method));
753       DCHECK(it != jni_stubs_map_.end())
754           << "Entry inserted in NotifyCompilationOf() should be alive.";
755       JniStubData* data = &it->second;
756       DCHECK(ContainsElement(data->GetMethods(), method))
757           << "Entry inserted in NotifyCompilationOf() should contain this method.";
758       data->SetCode(code_ptr);
759       data->UpdateEntryPoints(method_header->GetEntryPoint());
760     } else {
761       if (method->IsPreCompiled() && IsSharedRegion(*region)) {
762         ScopedDebugDisallowReadBarriers sddrb(self);
763         zygote_map_.Put(code_ptr, method);
764       } else {
765         ScopedDebugDisallowReadBarriers sddrb(self);
766         method_code_map_.Put(code_ptr, method);
767       }
768       if (compilation_kind == CompilationKind::kOsr) {
769         ScopedDebugDisallowReadBarriers sddrb(self);
770         osr_code_map_.Put(method, code_ptr);
771       } else if (method->StillNeedsClinitCheck()) {
772         ScopedDebugDisallowReadBarriers sddrb(self);
773         // This situation currently only occurs in the jit-zygote mode.
774         DCHECK(!garbage_collect_code_);
775         DCHECK(method->IsPreCompiled());
776         // The shared region can easily be queried. For the private region, we
777         // use a side map.
778         if (!IsSharedRegion(*region)) {
779           saved_compiled_methods_map_.Put(method, code_ptr);
780         }
781       } else {
782         Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
783             method, method_header->GetEntryPoint());
784       }
785     }
786     if (collection_in_progress_) {
787       // We need to update the live bitmap if there is a GC to ensure it sees this new
788       // code.
789       GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
790     }
791     VLOG(jit)
792         << "JIT added (kind=" << compilation_kind << ") "
793         << ArtMethod::PrettyMethod(method) << "@" << method
794         << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": "
795         << " dcache_size=" << PrettySize(DataCacheSizeLocked()) << ": "
796         << reinterpret_cast<const void*>(method_header->GetEntryPoint()) << ","
797         << reinterpret_cast<const void*>(method_header->GetEntryPoint() +
798                                          method_header->GetCodeSize());
799   }
800 
801   return true;
802 }
803 
CodeCacheSize()804 size_t JitCodeCache::CodeCacheSize() {
805   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
806   return CodeCacheSizeLocked();
807 }
808 
RemoveMethod(ArtMethod * method,bool release_memory)809 bool JitCodeCache::RemoveMethod(ArtMethod* method, bool release_memory) {
810   // This function is used only for testing and only with non-native methods.
811   CHECK(!method->IsNative());
812 
813   Thread* self = Thread::Current();
814   ScopedDebugDisallowReadBarriers sddrb(self);
815   MutexLock mu(self, *Locks::jit_lock_);
816 
817   bool osr = osr_code_map_.find(method) != osr_code_map_.end();
818   bool in_cache = RemoveMethodLocked(method, release_memory);
819 
820   if (!in_cache) {
821     return false;
822   }
823 
824   ClearMethodCounter(method, /* was_warm= */ false);
825   Runtime::Current()->GetInstrumentation()->InitializeMethodsCode(method, /*aot_code=*/ nullptr);
826   VLOG(jit)
827       << "JIT removed (osr=" << std::boolalpha << osr << std::noboolalpha << ") "
828       << ArtMethod::PrettyMethod(method) << "@" << method
829       << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": "
830       << " dcache_size=" << PrettySize(DataCacheSizeLocked());
831   return true;
832 }
833 
RemoveMethodLocked(ArtMethod * method,bool release_memory)834 bool JitCodeCache::RemoveMethodLocked(ArtMethod* method, bool release_memory) {
835   if (LIKELY(!method->IsNative())) {
836     auto it = profiling_infos_.find(method);
837     if (it != profiling_infos_.end()) {
838       profiling_infos_.erase(it);
839     }
840   }
841 
842   bool in_cache = false;
843   ScopedCodeCacheWrite ccw(private_region_);
844   if (UNLIKELY(method->IsNative())) {
845     auto it = jni_stubs_map_.find(JniStubKey(method));
846     if (it != jni_stubs_map_.end() && it->second.RemoveMethod(method)) {
847       in_cache = true;
848       if (it->second.GetMethods().empty()) {
849         if (release_memory) {
850           FreeCodeAndData(it->second.GetCode());
851         }
852         jni_stubs_map_.erase(it);
853       } else {
854         it->first.UpdateShorty(it->second.GetMethods().front());
855       }
856     }
857   } else {
858     for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
859       if (it->second == method) {
860         in_cache = true;
861         if (release_memory) {
862           FreeCodeAndData(it->first);
863         }
864         VLOG(jit) << "JIT removed " << it->second->PrettyMethod() << ": " << it->first;
865         it = method_code_map_.erase(it);
866       } else {
867         ++it;
868       }
869     }
870 
871     auto osr_it = osr_code_map_.find(method);
872     if (osr_it != osr_code_map_.end()) {
873       osr_code_map_.erase(osr_it);
874     }
875   }
876 
877   return in_cache;
878 }
879 
880 // This notifies the code cache that the given method has been redefined and that it should remove
881 // any cached information it has on the method. All threads must be suspended before calling this
882 // method. The compiled code for the method (if there is any) must not be in any threads call stack.
NotifyMethodRedefined(ArtMethod * method)883 void JitCodeCache::NotifyMethodRedefined(ArtMethod* method) {
884   Thread* self = Thread::Current();
885   ScopedDebugDisallowReadBarriers sddrb(self);
886   MutexLock mu(self, *Locks::jit_lock_);
887   RemoveMethodLocked(method, /* release_memory= */ true);
888 }
889 
890 // This invalidates old_method. Once this function returns one can no longer use old_method to
891 // execute code unless it is fixed up. This fixup will happen later in the process of installing a
892 // class redefinition.
893 // TODO We should add some info to ArtMethod to note that 'old_method' has been invalidated and
894 // shouldn't be used since it is no longer logically in the jit code cache.
895 // TODO We should add DCHECKS that validate that the JIT is paused when this method is entered.
MoveObsoleteMethod(ArtMethod * old_method,ArtMethod * new_method)896 void JitCodeCache::MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) {
897   Thread* self = Thread::Current();
898   ScopedDebugDisallowReadBarriers sddrb(self);
899   MutexLock mu(self, *Locks::jit_lock_);
900   if (old_method->IsNative()) {
901     // Update methods in jni_stubs_map_.
902     for (auto& entry : jni_stubs_map_) {
903       JniStubData& data = entry.second;
904       data.MoveObsoleteMethod(old_method, new_method);
905     }
906     return;
907   }
908   // Update method_code_map_ to point to the new method.
909   for (auto& it : method_code_map_) {
910     if (it.second == old_method) {
911       it.second = new_method;
912     }
913   }
914   // Update osr_code_map_ to point to the new method.
915   auto code_map = osr_code_map_.find(old_method);
916   if (code_map != osr_code_map_.end()) {
917     osr_code_map_.Put(new_method, code_map->second);
918     osr_code_map_.erase(old_method);
919   }
920 }
921 
TransitionToDebuggable()922 void JitCodeCache::TransitionToDebuggable() {
923   // Check that none of our methods have an entrypoint in the zygote exec
924   // space (this should be taken care of by
925   // ClassLinker::UpdateEntryPointsClassVisitor.
926   Thread* self = Thread::Current();
927   ScopedDebugDisallowReadBarriers sddrb(self);
928   {
929     MutexLock mu(self, *Locks::jit_lock_);
930     if (kIsDebugBuild) {
931       // TODO: Check `jni_stubs_map_`?
932       for (const auto& entry : method_code_map_) {
933         ArtMethod* method = entry.second;
934         DCHECK(!method->IsPreCompiled());
935         DCHECK(!IsInZygoteExecSpace(method->GetEntryPointFromQuickCompiledCode()));
936       }
937     }
938     // Not strictly necessary, but this map is useless now.
939     saved_compiled_methods_map_.clear();
940   }
941   if (kIsDebugBuild) {
942     for (const auto& entry : zygote_map_) {
943       ArtMethod* method = entry.method;
944       if (method != nullptr) {
945         DCHECK(!method->IsPreCompiled());
946         DCHECK(!IsInZygoteExecSpace(method->GetEntryPointFromQuickCompiledCode()));
947       }
948     }
949   }
950 }
951 
CodeCacheSizeLocked()952 size_t JitCodeCache::CodeCacheSizeLocked() {
953   return GetCurrentRegion()->GetUsedMemoryForCode();
954 }
955 
DataCacheSize()956 size_t JitCodeCache::DataCacheSize() {
957   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
958   return DataCacheSizeLocked();
959 }
960 
DataCacheSizeLocked()961 size_t JitCodeCache::DataCacheSizeLocked() {
962   return GetCurrentRegion()->GetUsedMemoryForData();
963 }
964 
Reserve(Thread * self,JitMemoryRegion * region,size_t code_size,size_t stack_map_size,size_t number_of_roots,ArtMethod * method,ArrayRef<const uint8_t> * reserved_code,ArrayRef<const uint8_t> * reserved_data)965 bool JitCodeCache::Reserve(Thread* self,
966                            JitMemoryRegion* region,
967                            size_t code_size,
968                            size_t stack_map_size,
969                            size_t number_of_roots,
970                            ArtMethod* method,
971                            /*out*/ArrayRef<const uint8_t>* reserved_code,
972                            /*out*/ArrayRef<const uint8_t>* reserved_data) {
973   code_size = OatQuickMethodHeader::InstructionAlignedSize() + code_size;
974   size_t data_size = RoundUp(ComputeRootTableSize(number_of_roots) + stack_map_size, sizeof(void*));
975 
976   const uint8_t* code;
977   const uint8_t* data;
978   while (true) {
979     bool at_max_capacity = false;
980     {
981       ScopedThreadSuspension sts(self, ThreadState::kSuspended);
982       MutexLock mu(self, *Locks::jit_lock_);
983       WaitForPotentialCollectionToComplete(self);
984       ScopedCodeCacheWrite ccw(*region);
985       code = region->AllocateCode(code_size);
986       data = region->AllocateData(data_size);
987       at_max_capacity = IsAtMaxCapacity();
988     }
989     if (code != nullptr && data != nullptr) {
990       break;
991     }
992     Free(self, region, code, data);
993     if (at_max_capacity) {
994       VLOG(jit) << "JIT failed to allocate code of size "
995                 << PrettySize(code_size)
996                 << ", and data of size "
997                 << PrettySize(data_size);
998       return false;
999     }
1000     // Run a code cache collection and try again.
1001     GarbageCollectCache(self);
1002   }
1003 
1004   *reserved_code = ArrayRef<const uint8_t>(code, code_size);
1005   *reserved_data = ArrayRef<const uint8_t>(data, data_size);
1006 
1007   MutexLock mu(self, *Locks::jit_lock_);
1008   histogram_code_memory_use_.AddValue(code_size);
1009   if (code_size > kCodeSizeLogThreshold) {
1010     LOG(INFO) << "JIT allocated "
1011               << PrettySize(code_size)
1012               << " for compiled code of "
1013               << ArtMethod::PrettyMethod(method);
1014   }
1015   histogram_stack_map_memory_use_.AddValue(data_size);
1016   if (data_size > kStackMapSizeLogThreshold) {
1017     LOG(INFO) << "JIT allocated "
1018               << PrettySize(data_size)
1019               << " for stack maps of "
1020               << ArtMethod::PrettyMethod(method);
1021   }
1022   return true;
1023 }
1024 
Free(Thread * self,JitMemoryRegion * region,const uint8_t * code,const uint8_t * data)1025 void JitCodeCache::Free(Thread* self,
1026                         JitMemoryRegion* region,
1027                         const uint8_t* code,
1028                         const uint8_t* data) {
1029   MutexLock mu(self, *Locks::jit_lock_);
1030   ScopedCodeCacheWrite ccw(*region);
1031   FreeLocked(region, code, data);
1032 }
1033 
FreeLocked(JitMemoryRegion * region,const uint8_t * code,const uint8_t * data)1034 void JitCodeCache::FreeLocked(JitMemoryRegion* region, const uint8_t* code, const uint8_t* data) {
1035   if (code != nullptr) {
1036     RemoveNativeDebugInfoForJit(reinterpret_cast<const void*>(FromAllocationToCode(code)));
1037     region->FreeCode(code);
1038   }
1039   if (data != nullptr) {
1040     region->FreeData(data);
1041   }
1042 }
1043 
1044 class MarkCodeClosure final : public Closure {
1045  public:
MarkCodeClosure(JitCodeCache * code_cache,CodeCacheBitmap * bitmap,Barrier * barrier)1046   MarkCodeClosure(JitCodeCache* code_cache, CodeCacheBitmap* bitmap, Barrier* barrier)
1047       : code_cache_(code_cache), bitmap_(bitmap), barrier_(barrier) {}
1048 
Run(Thread * thread)1049   void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
1050     ScopedTrace trace(__PRETTY_FUNCTION__);
1051     DCHECK(thread == Thread::Current() || thread->IsSuspended());
1052     StackVisitor::WalkStack(
1053         [&](const art::StackVisitor* stack_visitor) {
1054           const OatQuickMethodHeader* method_header =
1055               stack_visitor->GetCurrentOatQuickMethodHeader();
1056           if (method_header == nullptr) {
1057             return true;
1058           }
1059           const void* code = method_header->GetCode();
1060           if (code_cache_->ContainsPc(code) && !code_cache_->IsInZygoteExecSpace(code)) {
1061             // Use the atomic set version, as multiple threads are executing this code.
1062             bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
1063           }
1064           return true;
1065         },
1066         thread,
1067         /* context= */ nullptr,
1068         art::StackVisitor::StackWalkKind::kSkipInlinedFrames);
1069 
1070     barrier_->Pass(Thread::Current());
1071   }
1072 
1073  private:
1074   JitCodeCache* const code_cache_;
1075   CodeCacheBitmap* const bitmap_;
1076   Barrier* const barrier_;
1077 };
1078 
NotifyCollectionDone(Thread * self)1079 void JitCodeCache::NotifyCollectionDone(Thread* self) {
1080   collection_in_progress_ = false;
1081   lock_cond_.Broadcast(self);
1082 }
1083 
MarkCompiledCodeOnThreadStacks(Thread * self)1084 void JitCodeCache::MarkCompiledCodeOnThreadStacks(Thread* self) {
1085   Barrier barrier(0);
1086   size_t threads_running_checkpoint = 0;
1087   MarkCodeClosure closure(this, GetLiveBitmap(), &barrier);
1088   threads_running_checkpoint = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
1089   // Now that we have run our checkpoint, move to a suspended state and wait
1090   // for other threads to run the checkpoint.
1091   ScopedThreadSuspension sts(self, ThreadState::kSuspended);
1092   if (threads_running_checkpoint != 0) {
1093     barrier.Increment(self, threads_running_checkpoint);
1094   }
1095 }
1096 
IsAtMaxCapacity() const1097 bool JitCodeCache::IsAtMaxCapacity() const {
1098   return private_region_.GetCurrentCapacity() == private_region_.GetMaxCapacity();
1099 }
1100 
ShouldDoFullCollection()1101 bool JitCodeCache::ShouldDoFullCollection() {
1102   if (IsAtMaxCapacity()) {
1103     // Always do a full collection when the code cache is full.
1104     return true;
1105   } else if (private_region_.GetCurrentCapacity() < kReservedCapacity) {
1106     // Always do partial collection when the code cache size is below the reserved
1107     // capacity.
1108     return false;
1109   } else if (last_collection_increased_code_cache_) {
1110     // This time do a full collection.
1111     return true;
1112   } else {
1113     // This time do a partial collection.
1114     return false;
1115   }
1116 }
1117 
GarbageCollectCache(Thread * self)1118 void JitCodeCache::GarbageCollectCache(Thread* self) {
1119   ScopedTrace trace(__FUNCTION__);
1120   // Wait for an existing collection, or let everyone know we are starting one.
1121   {
1122     ScopedThreadSuspension sts(self, ThreadState::kSuspended);
1123     MutexLock mu(self, *Locks::jit_lock_);
1124     if (!garbage_collect_code_) {
1125       private_region_.IncreaseCodeCacheCapacity();
1126       return;
1127     } else if (WaitForPotentialCollectionToComplete(self)) {
1128       return;
1129     } else {
1130       number_of_collections_++;
1131       live_bitmap_.reset(CodeCacheBitmap::Create(
1132           "code-cache-bitmap",
1133           reinterpret_cast<uintptr_t>(private_region_.GetExecPages()->Begin()),
1134           reinterpret_cast<uintptr_t>(
1135               private_region_.GetExecPages()->Begin() + private_region_.GetCurrentCapacity() / 2)));
1136       collection_in_progress_ = true;
1137     }
1138   }
1139 
1140   TimingLogger logger("JIT code cache timing logger", true, VLOG_IS_ON(jit));
1141   {
1142     TimingLogger::ScopedTiming st("Code cache collection", &logger);
1143 
1144     bool do_full_collection = false;
1145     {
1146       MutexLock mu(self, *Locks::jit_lock_);
1147       do_full_collection = ShouldDoFullCollection();
1148     }
1149 
1150     VLOG(jit) << "Do "
1151               << (do_full_collection ? "full" : "partial")
1152               << " code cache collection, code="
1153               << PrettySize(CodeCacheSize())
1154               << ", data=" << PrettySize(DataCacheSize());
1155 
1156     DoCollection(self, /* collect_profiling_info= */ do_full_collection);
1157 
1158     VLOG(jit) << "After code cache collection, code="
1159               << PrettySize(CodeCacheSize())
1160               << ", data=" << PrettySize(DataCacheSize());
1161 
1162     {
1163       MutexLock mu(self, *Locks::jit_lock_);
1164 
1165       // Increase the code cache only when we do partial collections.
1166       // TODO: base this strategy on how full the code cache is?
1167       if (do_full_collection) {
1168         last_collection_increased_code_cache_ = false;
1169       } else {
1170         last_collection_increased_code_cache_ = true;
1171         private_region_.IncreaseCodeCacheCapacity();
1172       }
1173 
1174       bool next_collection_will_be_full = ShouldDoFullCollection();
1175 
1176       // Start polling the liveness of compiled code to prepare for the next full collection.
1177       if (next_collection_will_be_full) {
1178         ScopedDebugDisallowReadBarriers sddrb(self);
1179         for (auto it : profiling_infos_) {
1180           it.second->ResetCounter();
1181         }
1182 
1183         // Change entry points of native methods back to the GenericJNI entrypoint.
1184         for (const auto& entry : jni_stubs_map_) {
1185           const JniStubData& data = entry.second;
1186           if (!data.IsCompiled() || IsInZygoteExecSpace(data.GetCode())) {
1187             continue;
1188           }
1189           const OatQuickMethodHeader* method_header =
1190               OatQuickMethodHeader::FromCodePointer(data.GetCode());
1191           for (ArtMethod* method : data.GetMethods()) {
1192             if (method->GetEntryPointFromQuickCompiledCode() == method_header->GetEntryPoint()) {
1193               // Don't call Instrumentation::UpdateMethodsCode(), same as for normal methods above.
1194               // Make sure a single invocation of the GenericJNI trampoline tries to recompile.
1195               method->SetHotCounter();
1196               method->SetEntryPointFromQuickCompiledCode(GetQuickGenericJniStub());
1197             }
1198           }
1199         }
1200       }
1201       live_bitmap_.reset(nullptr);
1202       NotifyCollectionDone(self);
1203     }
1204   }
1205   Runtime::Current()->GetJit()->AddTimingLogger(logger);
1206 }
1207 
RemoveUnmarkedCode(Thread * self)1208 void JitCodeCache::RemoveUnmarkedCode(Thread* self) {
1209   ScopedTrace trace(__FUNCTION__);
1210   ScopedDebugDisallowReadBarriers sddrb(self);
1211   std::unordered_set<OatQuickMethodHeader*> method_headers;
1212   {
1213     MutexLock mu(self, *Locks::jit_lock_);
1214     // Iterate over all compiled code and remove entries that are not marked.
1215     for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
1216       JniStubData* data = &it->second;
1217       if (IsInZygoteExecSpace(data->GetCode()) ||
1218           !data->IsCompiled() ||
1219           GetLiveBitmap()->Test(FromCodeToAllocation(data->GetCode()))) {
1220         ++it;
1221       } else {
1222         method_headers.insert(OatQuickMethodHeader::FromCodePointer(data->GetCode()));
1223         for (ArtMethod* method : data->GetMethods()) {
1224           VLOG(jit) << "JIT removed (JNI) " << method->PrettyMethod() << ": " << data->GetCode();
1225         }
1226         it = jni_stubs_map_.erase(it);
1227       }
1228     }
1229     for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
1230       const void* code_ptr = it->first;
1231       uintptr_t allocation = FromCodeToAllocation(code_ptr);
1232       if (IsInZygoteExecSpace(code_ptr) || GetLiveBitmap()->Test(allocation)) {
1233         ++it;
1234       } else {
1235         OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1236         method_headers.insert(header);
1237         VLOG(jit) << "JIT removed " << it->second->PrettyMethod() << ": " << it->first;
1238         it = method_code_map_.erase(it);
1239       }
1240     }
1241     FreeAllMethodHeaders(method_headers);
1242   }
1243 }
1244 
GetGarbageCollectCode()1245 bool JitCodeCache::GetGarbageCollectCode() {
1246   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
1247   return garbage_collect_code_;
1248 }
1249 
SetGarbageCollectCode(bool value)1250 void JitCodeCache::SetGarbageCollectCode(bool value) {
1251   Thread* self = Thread::Current();
1252   MutexLock mu(self, *Locks::jit_lock_);
1253   // Update the flag while holding the lock to ensure no thread will try to GC.
1254   garbage_collect_code_ = value;
1255 }
1256 
RemoveMethodBeingCompiled(ArtMethod * method,CompilationKind kind)1257 void JitCodeCache::RemoveMethodBeingCompiled(ArtMethod* method, CompilationKind kind) {
1258   ScopedDebugDisallowReadBarriers sddrb(Thread::Current());
1259   DCHECK(IsMethodBeingCompiled(method, kind));
1260   switch (kind) {
1261     case CompilationKind::kOsr:
1262       current_osr_compilations_.erase(method);
1263       break;
1264     case CompilationKind::kBaseline:
1265       current_baseline_compilations_.erase(method);
1266       break;
1267     case CompilationKind::kOptimized:
1268       current_optimized_compilations_.erase(method);
1269       break;
1270   }
1271 }
1272 
AddMethodBeingCompiled(ArtMethod * method,CompilationKind kind)1273 void JitCodeCache::AddMethodBeingCompiled(ArtMethod* method, CompilationKind kind) {
1274   ScopedDebugDisallowReadBarriers sddrb(Thread::Current());
1275   DCHECK(!IsMethodBeingCompiled(method, kind));
1276   switch (kind) {
1277     case CompilationKind::kOsr:
1278       current_osr_compilations_.insert(method);
1279       break;
1280     case CompilationKind::kBaseline:
1281       current_baseline_compilations_.insert(method);
1282       break;
1283     case CompilationKind::kOptimized:
1284       current_optimized_compilations_.insert(method);
1285       break;
1286   }
1287 }
1288 
IsMethodBeingCompiled(ArtMethod * method,CompilationKind kind)1289 bool JitCodeCache::IsMethodBeingCompiled(ArtMethod* method, CompilationKind kind) {
1290   ScopedDebugDisallowReadBarriers sddrb(Thread::Current());
1291   switch (kind) {
1292     case CompilationKind::kOsr:
1293       return ContainsElement(current_osr_compilations_, method);
1294     case CompilationKind::kBaseline:
1295       return ContainsElement(current_baseline_compilations_, method);
1296     case CompilationKind::kOptimized:
1297       return ContainsElement(current_optimized_compilations_, method);
1298   }
1299 }
1300 
IsMethodBeingCompiled(ArtMethod * method)1301 bool JitCodeCache::IsMethodBeingCompiled(ArtMethod* method) {
1302   ScopedDebugDisallowReadBarriers sddrb(Thread::Current());
1303   return ContainsElement(current_optimized_compilations_, method) ||
1304       ContainsElement(current_osr_compilations_, method) ||
1305       ContainsElement(current_baseline_compilations_, method);
1306 }
1307 
GetProfilingInfo(ArtMethod * method,Thread * self)1308 ProfilingInfo* JitCodeCache::GetProfilingInfo(ArtMethod* method, Thread* self) {
1309   ScopedDebugDisallowReadBarriers sddrb(self);
1310   MutexLock mu(self, *Locks::jit_lock_);
1311   DCHECK(IsMethodBeingCompiled(method))
1312       << "GetProfilingInfo should only be called when the method is being compiled";
1313   auto it = profiling_infos_.find(method);
1314   if (it == profiling_infos_.end()) {
1315     return nullptr;
1316   }
1317   return it->second;
1318 }
1319 
ResetHotnessCounter(ArtMethod * method,Thread * self)1320 void JitCodeCache::ResetHotnessCounter(ArtMethod* method, Thread* self) {
1321   ScopedDebugDisallowReadBarriers sddrb(self);
1322   MutexLock mu(self, *Locks::jit_lock_);
1323   auto it = profiling_infos_.find(method);
1324   DCHECK(it != profiling_infos_.end());
1325   it->second->ResetCounter();
1326 }
1327 
1328 
DoCollection(Thread * self,bool collect_profiling_info)1329 void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) {
1330   ScopedTrace trace(__FUNCTION__);
1331   {
1332     ScopedDebugDisallowReadBarriers sddrb(self);
1333     MutexLock mu(self, *Locks::jit_lock_);
1334 
1335     // Update to interpreter the methods that have baseline entrypoints and whose baseline
1336     // hotness count hasn't changed.
1337     // Note that these methods may be in thread stack or concurrently revived
1338     // between. That's OK, as the thread executing it will mark it.
1339     uint16_t warmup_threshold = Runtime::Current()->GetJITOptions()->GetWarmupThreshold();
1340     for (auto it : profiling_infos_) {
1341       ProfilingInfo* info = it.second;
1342       if (!info->CounterHasChanged()) {
1343         const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
1344         if (ContainsPc(entry_point)) {
1345           OatQuickMethodHeader* method_header =
1346               OatQuickMethodHeader::FromEntryPoint(entry_point);
1347           if (CodeInfo::IsBaseline(method_header->GetOptimizedCodeInfoPtr())) {
1348             info->GetMethod()->ResetCounter(warmup_threshold);
1349             Runtime::Current()->GetInstrumentation()->InitializeMethodsCode(
1350                 info->GetMethod(), /*aot_code=*/ nullptr);
1351           }
1352         }
1353       }
1354     }
1355     // TODO: collect profiling info
1356     // TODO: collect optimized code
1357 
1358     // Mark compiled code that are entrypoints of ArtMethods. Compiled code that is not
1359     // an entry point is either:
1360     // - an osr compiled code, that will be removed if not in a thread call stack.
1361     // - discarded compiled code, that will be removed if not in a thread call stack.
1362     for (const auto& entry : jni_stubs_map_) {
1363       const JniStubData& data = entry.second;
1364       const void* code_ptr = data.GetCode();
1365       if (IsInZygoteExecSpace(code_ptr)) {
1366         continue;
1367       }
1368       const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1369       for (ArtMethod* method : data.GetMethods()) {
1370         if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
1371           GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
1372           break;
1373         }
1374       }
1375     }
1376     for (const auto& it : method_code_map_) {
1377       ArtMethod* method = it.second;
1378       const void* code_ptr = it.first;
1379       if (IsInZygoteExecSpace(code_ptr)) {
1380         continue;
1381       }
1382       const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1383       if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
1384         GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
1385       }
1386     }
1387 
1388     // Empty osr method map, as osr compiled code will be deleted (except the ones
1389     // on thread stacks).
1390     osr_code_map_.clear();
1391   }
1392 
1393   // Run a checkpoint on all threads to mark the JIT compiled code they are running.
1394   MarkCompiledCodeOnThreadStacks(self);
1395 
1396   // At this point, mutator threads are still running, and entrypoints of methods can
1397   // change. We do know they cannot change to a code cache entry that is not marked,
1398   // therefore we can safely remove those entries.
1399   RemoveUnmarkedCode(self);
1400 
1401   if (collect_profiling_info) {
1402     // TODO: Collect unused profiling infos.
1403   }
1404 }
1405 
LookupMethodHeader(uintptr_t pc,ArtMethod * method)1406 OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
1407   static_assert(kRuntimeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA");
1408   if (kRuntimeISA == InstructionSet::kArm) {
1409     // On Thumb-2, the pc is offset by one.
1410     --pc;
1411   }
1412   if (!ContainsPc(reinterpret_cast<const void*>(pc))) {
1413     return nullptr;
1414   }
1415 
1416   if (!kIsDebugBuild) {
1417     // Called with null `method` only from MarkCodeClosure::Run() in debug build.
1418     CHECK(method != nullptr);
1419   }
1420 
1421   Thread* self = Thread::Current();
1422   ScopedDebugDisallowReadBarriers sddrb(self);
1423   MutexLock mu(self, *Locks::jit_lock_);
1424   OatQuickMethodHeader* method_header = nullptr;
1425   ArtMethod* found_method = nullptr;  // Only for DCHECK(), not for JNI stubs.
1426   if (method != nullptr && UNLIKELY(method->IsNative())) {
1427     auto it = jni_stubs_map_.find(JniStubKey(method));
1428     if (it == jni_stubs_map_.end() || !ContainsElement(it->second.GetMethods(), method)) {
1429       return nullptr;
1430     }
1431     const void* code_ptr = it->second.GetCode();
1432     method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1433     if (!method_header->Contains(pc)) {
1434       return nullptr;
1435     }
1436   } else {
1437     if (shared_region_.IsInExecSpace(reinterpret_cast<const void*>(pc))) {
1438       const void* code_ptr = zygote_map_.GetCodeFor(method, pc);
1439       if (code_ptr != nullptr) {
1440         return OatQuickMethodHeader::FromCodePointer(code_ptr);
1441       }
1442     }
1443     auto it = method_code_map_.lower_bound(reinterpret_cast<const void*>(pc));
1444     if (it != method_code_map_.begin()) {
1445       --it;
1446       const void* code_ptr = it->first;
1447       if (OatQuickMethodHeader::FromCodePointer(code_ptr)->Contains(pc)) {
1448         method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1449         found_method = it->second;
1450       }
1451     }
1452     if (method_header == nullptr && method == nullptr) {
1453       // Scan all compiled JNI stubs as well. This slow search is used only
1454       // for checks in debug build, for release builds the `method` is not null.
1455       for (auto&& entry : jni_stubs_map_) {
1456         const JniStubData& data = entry.second;
1457         if (data.IsCompiled() &&
1458             OatQuickMethodHeader::FromCodePointer(data.GetCode())->Contains(pc)) {
1459           method_header = OatQuickMethodHeader::FromCodePointer(data.GetCode());
1460         }
1461       }
1462     }
1463     if (method_header == nullptr) {
1464       return nullptr;
1465     }
1466   }
1467 
1468   if (kIsDebugBuild && method != nullptr && !method->IsNative()) {
1469     DCHECK_EQ(found_method, method)
1470         << ArtMethod::PrettyMethod(method) << " "
1471         << ArtMethod::PrettyMethod(found_method) << " "
1472         << std::hex << pc;
1473   }
1474   return method_header;
1475 }
1476 
LookupOsrMethodHeader(ArtMethod * method)1477 OatQuickMethodHeader* JitCodeCache::LookupOsrMethodHeader(ArtMethod* method) {
1478   Thread* self = Thread::Current();
1479   ScopedDebugDisallowReadBarriers sddrb(self);
1480   MutexLock mu(self, *Locks::jit_lock_);
1481   auto it = osr_code_map_.find(method);
1482   if (it == osr_code_map_.end()) {
1483     return nullptr;
1484   }
1485   return OatQuickMethodHeader::FromCodePointer(it->second);
1486 }
1487 
AddProfilingInfo(Thread * self,ArtMethod * method,const std::vector<uint32_t> & entries)1488 ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self,
1489                                               ArtMethod* method,
1490                                               const std::vector<uint32_t>& entries) {
1491   DCHECK(CanAllocateProfilingInfo());
1492   ProfilingInfo* info = nullptr;
1493   {
1494     MutexLock mu(self, *Locks::jit_lock_);
1495     info = AddProfilingInfoInternal(self, method, entries);
1496   }
1497 
1498   if (info == nullptr) {
1499     GarbageCollectCache(self);
1500     MutexLock mu(self, *Locks::jit_lock_);
1501     info = AddProfilingInfoInternal(self, method, entries);
1502   }
1503   return info;
1504 }
1505 
AddProfilingInfoInternal(Thread * self,ArtMethod * method,const std::vector<uint32_t> & entries)1506 ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self,
1507                                                       ArtMethod* method,
1508                                                       const std::vector<uint32_t>& entries) {
1509   ScopedDebugDisallowReadBarriers sddrb(self);
1510   // Check whether some other thread has concurrently created it.
1511   auto it = profiling_infos_.find(method);
1512   if (it != profiling_infos_.end()) {
1513     return it->second;
1514   }
1515 
1516   size_t profile_info_size = RoundUp(
1517       sizeof(ProfilingInfo) + sizeof(InlineCache) * entries.size(),
1518       sizeof(void*));
1519 
1520   const uint8_t* data = private_region_.AllocateData(profile_info_size);
1521   if (data == nullptr) {
1522     return nullptr;
1523   }
1524   uint8_t* writable_data = private_region_.GetWritableDataAddress(data);
1525   ProfilingInfo* info = new (writable_data) ProfilingInfo(method, entries);
1526 
1527   profiling_infos_.Put(method, info);
1528   histogram_profiling_info_memory_use_.AddValue(profile_info_size);
1529   return info;
1530 }
1531 
MoreCore(const void * mspace,intptr_t increment)1532 void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) {
1533   return shared_region_.OwnsSpace(mspace)
1534       ? shared_region_.MoreCore(mspace, increment)
1535       : private_region_.MoreCore(mspace, increment);
1536 }
1537 
GetProfiledMethods(const std::set<std::string> & dex_base_locations,std::vector<ProfileMethodInfo> & methods)1538 void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_locations,
1539                                       std::vector<ProfileMethodInfo>& methods) {
1540   Thread* self = Thread::Current();
1541   WaitUntilInlineCacheAccessible(self);
1542   // TODO: Avoid read barriers for potentially dead methods.
1543   // ScopedDebugDisallowReadBarriers sddrb(self);
1544   MutexLock mu(self, *Locks::jit_lock_);
1545   ScopedTrace trace(__FUNCTION__);
1546   for (const auto& entry : profiling_infos_) {
1547     ArtMethod* method = entry.first;
1548     ProfilingInfo* info = entry.second;
1549     DCHECK_EQ(method, info->GetMethod());
1550     const DexFile* dex_file = method->GetDexFile();
1551     const std::string base_location = DexFileLoader::GetBaseLocation(dex_file->GetLocation());
1552     if (!ContainsElement(dex_base_locations, base_location)) {
1553       // Skip dex files which are not profiled.
1554       continue;
1555     }
1556     std::vector<ProfileMethodInfo::ProfileInlineCache> inline_caches;
1557 
1558     // If the method is still baseline compiled, don't save the inline caches.
1559     // They might be incomplete and cause unnecessary deoptimizations.
1560     // If the inline cache is empty the compiler will generate a regular invoke virtual/interface.
1561     const void* entry_point = method->GetEntryPointFromQuickCompiledCode();
1562     if (ContainsPc(entry_point) &&
1563         CodeInfo::IsBaseline(
1564             OatQuickMethodHeader::FromEntryPoint(entry_point)->GetOptimizedCodeInfoPtr())) {
1565       methods.emplace_back(/*ProfileMethodInfo*/
1566           MethodReference(dex_file, method->GetDexMethodIndex()), inline_caches);
1567       continue;
1568     }
1569 
1570     for (size_t i = 0; i < info->number_of_inline_caches_; ++i) {
1571       std::vector<TypeReference> profile_classes;
1572       const InlineCache& cache = info->cache_[i];
1573       ArtMethod* caller = info->GetMethod();
1574       bool is_missing_types = false;
1575       for (size_t k = 0; k < InlineCache::kIndividualCacheSize; k++) {
1576         mirror::Class* cls = cache.classes_[k].Read();
1577         if (cls == nullptr) {
1578           break;
1579         }
1580 
1581         // Check if the receiver is in the boot class path or if it's in the
1582         // same class loader as the caller. If not, skip it, as there is not
1583         // much we can do during AOT.
1584         if (!cls->IsBootStrapClassLoaded() &&
1585             caller->GetClassLoader() != cls->GetClassLoader()) {
1586           is_missing_types = true;
1587           continue;
1588         }
1589 
1590         const DexFile* class_dex_file = nullptr;
1591         dex::TypeIndex type_index;
1592 
1593         if (cls->GetDexCache() == nullptr) {
1594           DCHECK(cls->IsArrayClass()) << cls->PrettyClass();
1595           // Make a best effort to find the type index in the method's dex file.
1596           // We could search all open dex files but that might turn expensive
1597           // and probably not worth it.
1598           class_dex_file = dex_file;
1599           type_index = cls->FindTypeIndexInOtherDexFile(*dex_file);
1600         } else {
1601           class_dex_file = &(cls->GetDexFile());
1602           type_index = cls->GetDexTypeIndex();
1603         }
1604         if (!type_index.IsValid()) {
1605           // Could be a proxy class or an array for which we couldn't find the type index.
1606           is_missing_types = true;
1607           continue;
1608         }
1609         if (ContainsElement(dex_base_locations,
1610                             DexFileLoader::GetBaseLocation(class_dex_file->GetLocation()))) {
1611           // Only consider classes from the same apk (including multidex).
1612           profile_classes.emplace_back(/*ProfileMethodInfo::ProfileClassReference*/
1613               class_dex_file, type_index);
1614         } else {
1615           is_missing_types = true;
1616         }
1617       }
1618       if (!profile_classes.empty()) {
1619         inline_caches.emplace_back(/*ProfileMethodInfo::ProfileInlineCache*/
1620             cache.dex_pc_, is_missing_types, profile_classes);
1621       }
1622     }
1623     methods.emplace_back(/*ProfileMethodInfo*/
1624         MethodReference(dex_file, method->GetDexMethodIndex()), inline_caches);
1625   }
1626 }
1627 
IsOsrCompiled(ArtMethod * method)1628 bool JitCodeCache::IsOsrCompiled(ArtMethod* method) {
1629   Thread* self = Thread::Current();
1630   ScopedDebugDisallowReadBarriers sddrb(self);
1631   MutexLock mu(self, *Locks::jit_lock_);
1632   return osr_code_map_.find(method) != osr_code_map_.end();
1633 }
1634 
VisitRoots(RootVisitor * visitor)1635 void JitCodeCache::VisitRoots(RootVisitor* visitor) {
1636   if (Runtime::Current()->GetHeap()->IsPerformingUffdCompaction()) {
1637     // In case of userfaultfd compaction, ArtMethods are updated concurrently
1638     // via linear-alloc.
1639     return;
1640   }
1641   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
1642   UnbufferedRootVisitor root_visitor(visitor, RootInfo(kRootStickyClass));
1643   for (ArtMethod* method : current_optimized_compilations_) {
1644     method->VisitRoots(root_visitor, kRuntimePointerSize);
1645   }
1646   for (ArtMethod* method : current_baseline_compilations_) {
1647     method->VisitRoots(root_visitor, kRuntimePointerSize);
1648   }
1649   for (ArtMethod* method : current_osr_compilations_) {
1650     method->VisitRoots(root_visitor, kRuntimePointerSize);
1651   }
1652 }
1653 
NotifyCompilationOf(ArtMethod * method,Thread * self,CompilationKind compilation_kind,bool prejit)1654 bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
1655                                        Thread* self,
1656                                        CompilationKind compilation_kind,
1657                                        bool prejit) {
1658   if (kIsDebugBuild) {
1659     MutexLock mu(self, *Locks::jit_lock_);
1660     // Note: the compilation kind may have been adjusted after what was passed initially.
1661     // We really just want to check that the method is indeed being compiled.
1662     CHECK(IsMethodBeingCompiled(method));
1663   }
1664   const void* existing_entry_point = method->GetEntryPointFromQuickCompiledCode();
1665   if (compilation_kind != CompilationKind::kOsr && ContainsPc(existing_entry_point)) {
1666     OatQuickMethodHeader* method_header =
1667         OatQuickMethodHeader::FromEntryPoint(existing_entry_point);
1668     bool is_baseline = (compilation_kind == CompilationKind::kBaseline);
1669     if (CodeInfo::IsBaseline(method_header->GetOptimizedCodeInfoPtr()) == is_baseline) {
1670       VLOG(jit) << "Not compiling "
1671                 << method->PrettyMethod()
1672                 << " because it has already been compiled"
1673                 << " kind=" << compilation_kind;
1674       return false;
1675     }
1676   }
1677 
1678   if (method->NeedsClinitCheckBeforeCall() && !prejit) {
1679     // We do not need a synchronization barrier for checking the visibly initialized status
1680     // or checking the initialized status just for requesting visible initialization.
1681     ClassStatus status = method->GetDeclaringClass()
1682         ->GetStatus<kDefaultVerifyFlags, /*kWithSynchronizationBarrier=*/ false>();
1683     if (status != ClassStatus::kVisiblyInitialized) {
1684       // Unless we're pre-jitting, we currently don't save the JIT compiled code if we cannot
1685       // update the entrypoint due to needing an initialization check.
1686       if (status == ClassStatus::kInitialized) {
1687         // Request visible initialization but do not block to allow compiling other methods.
1688         // Hopefully, this will complete by the time the method becomes hot again.
1689         Runtime::Current()->GetClassLinker()->MakeInitializedClassesVisiblyInitialized(
1690             self, /*wait=*/ false);
1691       }
1692       VLOG(jit) << "Not compiling "
1693                 << method->PrettyMethod()
1694                 << " because it has the resolution stub";
1695       // Give it a new chance to be hot.
1696       ClearMethodCounter(method, /*was_warm=*/ false);
1697       return false;
1698     }
1699   }
1700 
1701   ScopedDebugDisallowReadBarriers sddrb(self);
1702   if (compilation_kind == CompilationKind::kOsr) {
1703     MutexLock mu(self, *Locks::jit_lock_);
1704     if (osr_code_map_.find(method) != osr_code_map_.end()) {
1705       return false;
1706     }
1707   }
1708 
1709   if (UNLIKELY(method->IsNative())) {
1710     MutexLock mu(self, *Locks::jit_lock_);
1711     JniStubKey key(method);
1712     auto it = jni_stubs_map_.find(key);
1713     bool new_compilation = false;
1714     if (it == jni_stubs_map_.end()) {
1715       // Create a new entry to mark the stub as being compiled.
1716       it = jni_stubs_map_.Put(key, JniStubData{});
1717       new_compilation = true;
1718     }
1719     JniStubData* data = &it->second;
1720     data->AddMethod(method);
1721     if (data->IsCompiled()) {
1722       OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(data->GetCode());
1723       const void* entrypoint = method_header->GetEntryPoint();
1724       // Update also entrypoints of other methods held by the JniStubData.
1725       // We could simply update the entrypoint of `method` but if the last JIT GC has
1726       // changed these entrypoints to GenericJNI in preparation for a full GC, we may
1727       // as well change them back as this stub shall not be collected anyway and this
1728       // can avoid a few expensive GenericJNI calls.
1729       data->UpdateEntryPoints(entrypoint);
1730       if (collection_in_progress_) {
1731         if (!IsInZygoteExecSpace(data->GetCode())) {
1732           GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(data->GetCode()));
1733         }
1734       }
1735     }
1736     return new_compilation;
1737   } else {
1738     if (compilation_kind == CompilationKind::kBaseline) {
1739       DCHECK(CanAllocateProfilingInfo());
1740       bool has_profiling_info = false;
1741       {
1742         MutexLock mu(self, *Locks::jit_lock_);
1743         has_profiling_info = (profiling_infos_.find(method) != profiling_infos_.end());
1744       }
1745       if (!has_profiling_info) {
1746         if (ProfilingInfo::Create(self, method) == nullptr) {
1747           VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled baseline";
1748           ClearMethodCounter(method, /*was_warm=*/ false);
1749           return false;
1750         }
1751       }
1752     }
1753   }
1754   return true;
1755 }
1756 
NotifyCompilerUse(ArtMethod * method,Thread * self)1757 ProfilingInfo* JitCodeCache::NotifyCompilerUse(ArtMethod* method, Thread* self) {
1758   ScopedDebugDisallowReadBarriers sddrb(self);
1759   MutexLock mu(self, *Locks::jit_lock_);
1760   auto it = profiling_infos_.find(method);
1761   if (it == profiling_infos_.end()) {
1762     return nullptr;
1763   }
1764   if (!it->second->IncrementInlineUse()) {
1765     // Overflow of inlining uses, just bail.
1766     return nullptr;
1767   }
1768   return it->second;
1769 }
1770 
DoneCompilerUse(ArtMethod * method,Thread * self)1771 void JitCodeCache::DoneCompilerUse(ArtMethod* method, Thread* self) {
1772   ScopedDebugDisallowReadBarriers sddrb(self);
1773   MutexLock mu(self, *Locks::jit_lock_);
1774   auto it = profiling_infos_.find(method);
1775   DCHECK(it != profiling_infos_.end());
1776   it->second->DecrementInlineUse();
1777 }
1778 
DoneCompiling(ArtMethod * method,Thread * self)1779 void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self) {
1780   DCHECK_EQ(Thread::Current(), self);
1781   ScopedDebugDisallowReadBarriers sddrb(self);
1782   MutexLock mu(self, *Locks::jit_lock_);
1783   if (UNLIKELY(method->IsNative())) {
1784     auto it = jni_stubs_map_.find(JniStubKey(method));
1785     DCHECK(it != jni_stubs_map_.end());
1786     JniStubData* data = &it->second;
1787     DCHECK(ContainsElement(data->GetMethods(), method));
1788     if (UNLIKELY(!data->IsCompiled())) {
1789       // Failed to compile; the JNI compiler never fails, but the cache may be full.
1790       jni_stubs_map_.erase(it);  // Remove the entry added in NotifyCompilationOf().
1791     }  // else Commit() updated entrypoints of all methods in the JniStubData.
1792   }
1793 }
1794 
InvalidateAllCompiledCode()1795 void JitCodeCache::InvalidateAllCompiledCode() {
1796   Thread* self = Thread::Current();
1797   ScopedDebugDisallowReadBarriers sddrb(self);
1798   art::MutexLock mu(self, *Locks::jit_lock_);
1799   VLOG(jit) << "Invalidating all compiled code";
1800   Runtime* runtime = Runtime::Current();
1801   ClassLinker* linker = runtime->GetClassLinker();
1802   instrumentation::Instrumentation* instr = runtime->GetInstrumentation();
1803   // TODO: Clear `jni_stubs_map_`?
1804   for (const auto& entry : method_code_map_) {
1805     ArtMethod* meth = entry.second;
1806     // We were compiled, so we must be warm.
1807     ClearMethodCounter(meth, /*was_warm=*/true);
1808     if (UNLIKELY(meth->IsObsolete())) {
1809       linker->SetEntryPointsForObsoleteMethod(meth);
1810     } else {
1811       instr->InitializeMethodsCode(meth, /*aot_code=*/ nullptr);
1812     }
1813   }
1814 
1815   for (const auto& entry : zygote_map_) {
1816     if (entry.method == nullptr) {
1817       continue;
1818     }
1819     if (entry.method->IsPreCompiled()) {
1820       entry.method->ClearPreCompiled();
1821     }
1822     Runtime::Current()->GetInstrumentation()->InitializeMethodsCode(entry.method,
1823                                                                     /*aot_code=*/nullptr);
1824   }
1825 
1826   saved_compiled_methods_map_.clear();
1827   osr_code_map_.clear();
1828 }
1829 
InvalidateCompiledCodeFor(ArtMethod * method,const OatQuickMethodHeader * header)1830 void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
1831                                              const OatQuickMethodHeader* header) {
1832   DCHECK(!method->IsNative());
1833   const void* method_entrypoint = method->GetEntryPointFromQuickCompiledCode();
1834 
1835   // Clear the method counter if we are running jitted code since we might want to jit this again in
1836   // the future.
1837   if (method_entrypoint == header->GetEntryPoint()) {
1838     // The entrypoint is the one to invalidate, so we just update it to the interpreter entry point
1839     // and clear the counter to get the method Jitted again.
1840     Runtime::Current()->GetInstrumentation()->InitializeMethodsCode(method, /*aot_code=*/ nullptr);
1841     ClearMethodCounter(method, /*was_warm=*/ true);
1842   } else {
1843     Thread* self = Thread::Current();
1844     ScopedDebugDisallowReadBarriers sddrb(self);
1845     MutexLock mu(self, *Locks::jit_lock_);
1846     auto it = osr_code_map_.find(method);
1847     if (it != osr_code_map_.end() && OatQuickMethodHeader::FromCodePointer(it->second) == header) {
1848       // Remove the OSR method, to avoid using it again.
1849       osr_code_map_.erase(it);
1850     }
1851   }
1852 
1853   // In case the method was pre-compiled, clear that information so we
1854   // can recompile it ourselves.
1855   if (method->IsPreCompiled()) {
1856     method->ClearPreCompiled();
1857   }
1858 }
1859 
Dump(std::ostream & os)1860 void JitCodeCache::Dump(std::ostream& os) {
1861   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
1862   os << "Current JIT code cache size (used / resident): "
1863      << GetCurrentRegion()->GetUsedMemoryForCode() / KB << "KB / "
1864      << GetCurrentRegion()->GetResidentMemoryForCode() / KB << "KB\n"
1865      << "Current JIT data cache size (used / resident): "
1866      << GetCurrentRegion()->GetUsedMemoryForData() / KB << "KB / "
1867      << GetCurrentRegion()->GetResidentMemoryForData() / KB << "KB\n";
1868   if (!Runtime::Current()->IsZygote()) {
1869     os << "Zygote JIT code cache size (at point of fork): "
1870        << shared_region_.GetUsedMemoryForCode() / KB << "KB / "
1871        << shared_region_.GetResidentMemoryForCode() / KB << "KB\n"
1872        << "Zygote JIT data cache size (at point of fork): "
1873        << shared_region_.GetUsedMemoryForData() / KB << "KB / "
1874        << shared_region_.GetResidentMemoryForData() / KB << "KB\n";
1875   }
1876   os << "Current JIT mini-debug-info size: " << PrettySize(GetJitMiniDebugInfoMemUsage()) << "\n"
1877      << "Current JIT capacity: " << PrettySize(GetCurrentRegion()->GetCurrentCapacity()) << "\n"
1878      << "Current number of JIT JNI stub entries: " << jni_stubs_map_.size() << "\n"
1879      << "Current number of JIT code cache entries: " << method_code_map_.size() << "\n"
1880      << "Total number of JIT baseline compilations: " << number_of_baseline_compilations_ << "\n"
1881      << "Total number of JIT optimized compilations: " << number_of_optimized_compilations_ << "\n"
1882      << "Total number of JIT compilations for on stack replacement: "
1883         << number_of_osr_compilations_ << "\n"
1884      << "Total number of JIT code cache collections: " << number_of_collections_ << std::endl;
1885   histogram_stack_map_memory_use_.PrintMemoryUse(os);
1886   histogram_code_memory_use_.PrintMemoryUse(os);
1887   histogram_profiling_info_memory_use_.PrintMemoryUse(os);
1888 }
1889 
PostForkChildAction(bool is_system_server,bool is_zygote)1890 void JitCodeCache::PostForkChildAction(bool is_system_server, bool is_zygote) {
1891   Thread* self = Thread::Current();
1892 
1893   // Remove potential tasks that have been inherited from the zygote.
1894   // We do this now and not in Jit::PostForkChildAction, as system server calls
1895   // JitCodeCache::PostForkChildAction first, and then does some code loading
1896   // that may result in new JIT tasks that we want to keep.
1897   Runtime* runtime = Runtime::Current();
1898   ThreadPool* pool = runtime->GetJit()->GetThreadPool();
1899   if (pool != nullptr) {
1900     pool->RemoveAllTasks(self);
1901   }
1902 
1903   MutexLock mu(self, *Locks::jit_lock_);
1904 
1905   // Reset potential writable MemMaps inherited from the zygote. We never want
1906   // to write to them.
1907   shared_region_.ResetWritableMappings();
1908 
1909   if (is_zygote || runtime->IsSafeMode()) {
1910     // Don't create a private region for a child zygote. Regions are usually map shared
1911     // (to satisfy dual-view), and we don't want children of a child zygote to inherit it.
1912     return;
1913   }
1914 
1915   // Reset all statistics to be specific to this process.
1916   number_of_baseline_compilations_ = 0;
1917   number_of_optimized_compilations_ = 0;
1918   number_of_osr_compilations_ = 0;
1919   number_of_collections_ = 0;
1920   histogram_stack_map_memory_use_.Reset();
1921   histogram_code_memory_use_.Reset();
1922   histogram_profiling_info_memory_use_.Reset();
1923 
1924   size_t initial_capacity = runtime->GetJITOptions()->GetCodeCacheInitialCapacity();
1925   size_t max_capacity = runtime->GetJITOptions()->GetCodeCacheMaxCapacity();
1926   std::string error_msg;
1927   if (!private_region_.Initialize(initial_capacity,
1928                                   max_capacity,
1929                                   /* rwx_memory_allowed= */ !is_system_server,
1930                                   is_zygote,
1931                                   &error_msg)) {
1932     LOG(WARNING) << "Could not create private region after zygote fork: " << error_msg;
1933   }
1934   if (private_region_.HasCodeMapping()) {
1935     const MemMap* exec_pages = private_region_.GetExecPages();
1936     runtime->AddGeneratedCodeRange(exec_pages->Begin(), exec_pages->Size());
1937   }
1938 }
1939 
GetCurrentRegion()1940 JitMemoryRegion* JitCodeCache::GetCurrentRegion() {
1941   return Runtime::Current()->IsZygote() ? &shared_region_ : &private_region_;
1942 }
1943 
VisitAllMethods(const std::function<void (const void *,ArtMethod *)> & cb)1944 void JitCodeCache::VisitAllMethods(const std::function<void(const void*, ArtMethod*)>& cb) {
1945   for (const auto& it : jni_stubs_map_) {
1946     const JniStubData& data = it.second;
1947     if (data.IsCompiled()) {
1948       for (ArtMethod* method : data.GetMethods()) {
1949         cb(data.GetCode(), method);
1950       }
1951     }
1952   }
1953   for (auto it : method_code_map_) {  // Includes OSR methods.
1954     cb(it.first, it.second);
1955   }
1956   for (auto it : saved_compiled_methods_map_) {
1957     cb(it.second, it.first);
1958   }
1959   for (auto it : zygote_map_) {
1960     if (it.code_ptr != nullptr && it.method != nullptr) {
1961       cb(it.code_ptr, it.method);
1962     }
1963   }
1964 }
1965 
Initialize(uint32_t number_of_methods)1966 void ZygoteMap::Initialize(uint32_t number_of_methods) {
1967   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
1968   // Allocate for 40-80% capacity. This will offer OK lookup times, and termination
1969   // cases.
1970   size_t capacity = RoundUpToPowerOfTwo(number_of_methods * 100 / 80);
1971   const uint8_t* memory = region_->AllocateData(
1972       capacity * sizeof(Entry) + sizeof(ZygoteCompilationState));
1973   if (memory == nullptr) {
1974     LOG(WARNING) << "Could not allocate data for the zygote map";
1975     return;
1976   }
1977   const Entry* data = reinterpret_cast<const Entry*>(memory);
1978   region_->FillData(data, capacity, Entry { nullptr, nullptr });
1979   map_ = ArrayRef(data, capacity);
1980   compilation_state_ = reinterpret_cast<const ZygoteCompilationState*>(
1981       memory + capacity * sizeof(Entry));
1982   region_->WriteData(compilation_state_, ZygoteCompilationState::kInProgress);
1983 }
1984 
GetCodeFor(ArtMethod * method,uintptr_t pc) const1985 const void* ZygoteMap::GetCodeFor(ArtMethod* method, uintptr_t pc) const {
1986   if (map_.empty()) {
1987     return nullptr;
1988   }
1989 
1990   if (method == nullptr) {
1991     // Do a linear search. This should only be used in debug builds.
1992     CHECK(kIsDebugBuild);
1993     for (const Entry& entry : map_) {
1994       const void* code_ptr = entry.code_ptr;
1995       if (code_ptr != nullptr) {
1996         OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1997         if (method_header->Contains(pc)) {
1998           return code_ptr;
1999         }
2000       }
2001     }
2002     return nullptr;
2003   }
2004 
2005   std::hash<ArtMethod*> hf;
2006   size_t index = hf(method) & (map_.size() - 1u);
2007   size_t original_index = index;
2008   // Loop over the array: we know this loop terminates as we will either
2009   // encounter the given method, or a null entry. Both terminate the loop.
2010   // Note that the zygote may concurrently write new entries to the map. That's OK as the
2011   // map is never resized.
2012   while (true) {
2013     const Entry& entry = map_[index];
2014     if (entry.method == nullptr) {
2015       // Not compiled yet.
2016       return nullptr;
2017     }
2018     if (entry.method == method) {
2019       if (entry.code_ptr == nullptr) {
2020         // This is a race with the zygote which wrote the method, but hasn't written the
2021         // code. Just bail and wait for the next time we need the method.
2022         return nullptr;
2023       }
2024       if (pc != 0 && !OatQuickMethodHeader::FromCodePointer(entry.code_ptr)->Contains(pc)) {
2025         return nullptr;
2026       }
2027       return entry.code_ptr;
2028     }
2029     index = (index + 1) & (map_.size() - 1);
2030     DCHECK_NE(original_index, index);
2031   }
2032 }
2033 
Put(const void * code,ArtMethod * method)2034 void ZygoteMap::Put(const void* code, ArtMethod* method) {
2035   if (map_.empty()) {
2036     return;
2037   }
2038   CHECK(Runtime::Current()->IsZygote());
2039   std::hash<ArtMethod*> hf;
2040   size_t index = hf(method) & (map_.size() - 1);
2041   size_t original_index = index;
2042   // Because the size of the map is bigger than the number of methods that will
2043   // be added, we are guaranteed to find a free slot in the array, and
2044   // therefore for this loop to terminate.
2045   while (true) {
2046     const Entry* entry = &map_[index];
2047     if (entry->method == nullptr) {
2048       // Note that readers can read this memory concurrently, but that's OK as
2049       // we are writing pointers.
2050       region_->WriteData(entry, Entry { method, code });
2051       break;
2052     }
2053     index = (index + 1) & (map_.size() - 1);
2054     DCHECK_NE(original_index, index);
2055   }
2056   DCHECK_EQ(GetCodeFor(method), code);
2057 }
2058 
2059 }  // namespace jit
2060 }  // namespace art
2061