• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "jit.h"
18 
19 #include <dlfcn.h>
20 #include <sys/resource.h>
21 
22 #include "art_method-inl.h"
23 #include "base/file_utils.h"
24 #include "base/logging.h"  // For VLOG.
25 #include "base/memfd.h"
26 #include "base/memory_tool.h"
27 #include "base/pointer_size.h"
28 #include "base/runtime_debug.h"
29 #include "base/scoped_flock.h"
30 #include "base/utils.h"
31 #include "class_root-inl.h"
32 #include "compilation_kind.h"
33 #include "debugger.h"
34 #include "dex/type_lookup_table.h"
35 #include "entrypoints/entrypoint_utils-inl.h"
36 #include "entrypoints/runtime_asm_entrypoints.h"
37 #include "gc/space/image_space.h"
38 #include "interpreter/interpreter.h"
39 #include "jit-inl.h"
40 #include "jit_code_cache.h"
41 #include "jit_create.h"
42 #include "jni/java_vm_ext.h"
43 #include "mirror/method_handle_impl.h"
44 #include "mirror/var_handle.h"
45 #include "oat/image-inl.h"
46 #include "oat/oat_file.h"
47 #include "oat/oat_file_manager.h"
48 #include "oat/oat_quick_method_header.h"
49 #include "oat/stack_map.h"
50 #include "profile/profile_boot_info.h"
51 #include "profile/profile_compilation_info.h"
52 #include "profile_saver.h"
53 #include "runtime.h"
54 #include "runtime_options.h"
55 #include "small_pattern_matcher.h"
56 #include "stack.h"
57 #include "thread-inl.h"
58 #include "thread_list.h"
59 
60 using android::base::unique_fd;
61 
62 namespace art HIDDEN {
63 namespace jit {
64 
65 static constexpr bool kEnableOnStackReplacement = true;
66 
67 // JIT compiler
68 JitCompilerInterface* Jit::jit_compiler_ = nullptr;
69 
DumpInfo(std::ostream & os)70 void Jit::DumpInfo(std::ostream& os) {
71   code_cache_->Dump(os);
72   cumulative_timings_.Dump(os);
73   MutexLock mu(Thread::Current(), lock_);
74   memory_use_.PrintMemoryUse(os);
75 }
76 
DumpForSigQuit(std::ostream & os)77 void Jit::DumpForSigQuit(std::ostream& os) {
78   DumpInfo(os);
79   ProfileSaver::DumpInstanceInfo(os);
80 }
81 
AddTimingLogger(const TimingLogger & logger)82 void Jit::AddTimingLogger(const TimingLogger& logger) {
83   cumulative_timings_.AddLogger(logger);
84 }
85 
Jit(JitCodeCache * code_cache,JitOptions * options)86 Jit::Jit(JitCodeCache* code_cache, JitOptions* options)
87     : code_cache_(code_cache),
88       options_(options),
89       boot_completed_lock_("Jit::boot_completed_lock_"),
90       cumulative_timings_("JIT timings"),
91       memory_use_("Memory used for compilation", 16),
92       lock_("JIT memory use lock"),
93       zygote_mapping_methods_(),
94       fd_methods_(-1),
95       fd_methods_size_(0) {}
96 
Create(JitCodeCache * code_cache,JitOptions * options)97 std::unique_ptr<Jit> Jit::Create(JitCodeCache* code_cache, JitOptions* options) {
98   jit_compiler_ = jit_create();
99   std::unique_ptr<Jit> jit(new Jit(code_cache, options));
100 
101   // If the code collector is enabled, check if that still holds:
102   // With 'perf', we want a 1-1 mapping between an address and a method.
103   // We aren't able to keep method pointers live during the instrumentation method entry trampoline
104   // so we will just disable jit-gc if we are doing that.
105   // JitAtFirstUse compiles the methods synchronously on mutator threads. While this should work
106   // in theory it is causing deadlocks in some jvmti tests related to Jit GC. Hence, disabling
107   // Jit GC for now (b/147208992).
108   if (code_cache->GetGarbageCollectCode()) {
109     code_cache->SetGarbageCollectCode(!jit_compiler_->GenerateDebugInfo() &&
110         !jit->JitAtFirstUse());
111   }
112 
113   VLOG(jit) << "JIT created with initial_capacity="
114       << PrettySize(options->GetCodeCacheInitialCapacity())
115       << ", max_capacity=" << PrettySize(options->GetCodeCacheMaxCapacity())
116       << ", warmup_threshold=" << options->GetWarmupThreshold()
117       << ", optimize_threshold=" << options->GetOptimizeThreshold()
118       << ", profile_saver_options=" << options->GetProfileSaverOptions();
119 
120   // We want to know whether the compiler is compiling baseline, as this
121   // affects how we GC ProfilingInfos.
122   for (const std::string& option : Runtime::Current()->GetCompilerOptions()) {
123     if (option == "--baseline") {
124       options->SetUseBaselineCompiler();
125       break;
126     }
127   }
128 
129   // Notify native debugger about the classes already loaded before the creation of the jit.
130   jit->DumpTypeInfoForLoadedTypes(Runtime::Current()->GetClassLinker());
131 
132   return jit;
133 }
134 
135 
TryPatternMatch(ArtMethod * method_to_compile,CompilationKind compilation_kind)136 bool Jit::TryPatternMatch(ArtMethod* method_to_compile, CompilationKind compilation_kind) {
137   // Try to pattern match the method. Only on arm and arm64 for now as we have
138   // sufficiently similar calling convention between C++ and managed code.
139   if (kRuntimeISA == InstructionSet::kArm || kRuntimeISA == InstructionSet::kArm64) {
140     if (!Runtime::Current()->IsJavaDebuggable() &&
141         compilation_kind == CompilationKind::kBaseline &&
142         !method_to_compile->StillNeedsClinitCheck()) {
143       const void* pattern = SmallPatternMatcher::TryMatch(method_to_compile);
144       if (pattern != nullptr) {
145         VLOG(jit) << "Successfully pattern matched " << method_to_compile->PrettyMethod();
146         Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(method_to_compile, pattern);
147         return true;
148       }
149     }
150   }
151   return false;
152 }
153 
CompileMethodInternal(ArtMethod * method,Thread * self,CompilationKind compilation_kind,bool prejit)154 bool Jit::CompileMethodInternal(ArtMethod* method,
155                                 Thread* self,
156                                 CompilationKind compilation_kind,
157                                 bool prejit) {
158   DCHECK(Runtime::Current()->UseJitCompilation());
159   DCHECK(!method->IsRuntimeMethod());
160 
161   // If the baseline flag was explicitly passed in the compiler options, change the compilation kind
162   // from optimized to baseline.
163   if (jit_compiler_->IsBaselineCompiler() && compilation_kind == CompilationKind::kOptimized) {
164     compilation_kind = CompilationKind::kBaseline;
165   }
166 
167   if (method->IsPreCompiled() && !prejit) {
168     VLOG(jit) << "JIT not compiling " << method->PrettyMethod()
169               << " due to method marked pre-compile,"
170               << " and the compilation request isn't for pre-compilation.";
171     return false;
172   }
173 
174   // If we're asked to compile baseline, but we cannot allocate profiling infos,
175   // change the compilation kind to optimized.
176   if ((compilation_kind == CompilationKind::kBaseline) &&
177       !GetCodeCache()->CanAllocateProfilingInfo()) {
178     compilation_kind = CompilationKind::kOptimized;
179   }
180 
181   // Don't compile the method if it has breakpoints.
182   if (Runtime::Current()->GetInstrumentation()->IsDeoptimized(method)) {
183     VLOG(jit) << "JIT not compiling " << method->PrettyMethod()
184               << " due to not being safe to jit according to runtime-callbacks. For example, there"
185               << " could be breakpoints in this method.";
186     return false;
187   }
188 
189   if (!method->IsCompilable()) {
190     DCHECK(method->GetDeclaringClass()->IsObsoleteObject() ||
191            method->IsProxyMethod()) << method->PrettyMethod();
192     VLOG(jit) << "JIT not compiling " << method->PrettyMethod() << " due to method being made "
193               << "obsolete while waiting for JIT task to run. This probably happened due to "
194               << "concurrent structural class redefinition.";
195     return false;
196   }
197 
198   // Don't compile the method if we are supposed to be deoptimized.
199   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
200   if (instrumentation->AreAllMethodsDeoptimized() || instrumentation->IsDeoptimized(method)) {
201     VLOG(jit) << "JIT not compiling " << method->PrettyMethod() << " due to deoptimization";
202     return false;
203   }
204 
205   JitMemoryRegion* region = GetCodeCache()->GetCurrentRegion();
206   if ((compilation_kind == CompilationKind::kOsr) && GetCodeCache()->IsSharedRegion(*region)) {
207     VLOG(jit) << "JIT not osr compiling "
208               << method->PrettyMethod()
209               << " due to using shared region";
210     return false;
211   }
212 
213   // If we get a request to compile a proxy method, we pass the actual Java method
214   // of that proxy method, as the compiler does not expect a proxy method.
215   ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
216 
217   if (TryPatternMatch(method_to_compile, compilation_kind)) {
218     return true;
219   }
220 
221   if (!code_cache_->NotifyCompilationOf(method_to_compile, self, compilation_kind, prejit)) {
222     return false;
223   }
224 
225   VLOG(jit) << "Compiling method "
226             << ArtMethod::PrettyMethod(method_to_compile)
227             << " kind=" << compilation_kind;
228   bool success = jit_compiler_->CompileMethod(self, region, method_to_compile, compilation_kind);
229   code_cache_->DoneCompiling(method_to_compile, self);
230   if (!success) {
231     VLOG(jit) << "Failed to compile method "
232               << ArtMethod::PrettyMethod(method_to_compile)
233               << " kind=" << compilation_kind;
234   }
235   if (kIsDebugBuild) {
236     if (self->IsExceptionPending()) {
237       mirror::Throwable* exception = self->GetException();
238       LOG(FATAL) << "No pending exception expected after compiling "
239                  << ArtMethod::PrettyMethod(method)
240                  << ": "
241                  << exception->Dump();
242     }
243   }
244   return success;
245 }
246 
WaitForWorkersToBeCreated()247 void Jit::WaitForWorkersToBeCreated() {
248   if (thread_pool_ != nullptr) {
249     thread_pool_->WaitForWorkersToBeCreated();
250   }
251 }
252 
DeleteThreadPool()253 void Jit::DeleteThreadPool() {
254   Thread* self = Thread::Current();
255   if (thread_pool_ != nullptr) {
256     std::unique_ptr<JitThreadPool> pool;
257     {
258       ScopedSuspendAll ssa(__FUNCTION__);
259       // Clear thread_pool_ field while the threads are suspended.
260       // A mutator in the 'AddSamples' method will check against it.
261       pool = std::move(thread_pool_);
262     }
263 
264     // When running sanitized, let all tasks finish to not leak. Otherwise just clear the queue.
265     if (!kRunningOnMemoryTool) {
266       pool->StopWorkers(self);
267       pool->RemoveAllTasks(self);
268     }
269     // We could just suspend all threads, but we know those threads
270     // will finish in a short period, so it's not worth adding a suspend logic
271     // here. Besides, this is only done for shutdown.
272     pool->Wait(self, false, false);
273   }
274 }
275 
StartProfileSaver(const std::string & profile_filename,const std::vector<std::string> & code_paths,const std::string & ref_profile_filename)276 void Jit::StartProfileSaver(const std::string& profile_filename,
277                             const std::vector<std::string>& code_paths,
278                             const std::string& ref_profile_filename) {
279   if (options_->GetSaveProfilingInfo()) {
280     ProfileSaver::Start(options_->GetProfileSaverOptions(),
281                         profile_filename,
282                         code_cache_,
283                         code_paths,
284                         ref_profile_filename);
285   }
286 }
287 
StopProfileSaver()288 void Jit::StopProfileSaver() {
289   if (options_->GetSaveProfilingInfo() && ProfileSaver::IsStarted()) {
290     ProfileSaver::Stop(options_->DumpJitInfoOnShutdown());
291   }
292 }
293 
JitAtFirstUse()294 bool Jit::JitAtFirstUse() {
295   return HotMethodThreshold() == 0;
296 }
297 
CanInvokeCompiledCode(ArtMethod * method)298 bool Jit::CanInvokeCompiledCode(ArtMethod* method) {
299   return code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode());
300 }
301 
~Jit()302 Jit::~Jit() {
303   DCHECK_IMPLIES(options_->GetSaveProfilingInfo(), !ProfileSaver::IsStarted());
304   if (options_->DumpJitInfoOnShutdown()) {
305     DumpInfo(LOG_STREAM(INFO));
306     Runtime::Current()->DumpDeoptimizations(LOG_STREAM(INFO));
307   }
308   DeleteThreadPool();
309   if (jit_compiler_ != nullptr) {
310     delete jit_compiler_;
311     jit_compiler_ = nullptr;
312   }
313 }
314 
NewTypeLoadedIfUsingJit(mirror::Class * type)315 void Jit::NewTypeLoadedIfUsingJit(mirror::Class* type) {
316   if (!Runtime::Current()->UseJitCompilation()) {
317     // No need to notify if we only use the JIT to save profiles.
318     return;
319   }
320   jit::Jit* jit = Runtime::Current()->GetJit();
321   if (jit->jit_compiler_->GenerateDebugInfo()) {
322     jit_compiler_->TypesLoaded(&type, 1);
323   }
324 }
325 
DumpTypeInfoForLoadedTypes(ClassLinker * linker)326 void Jit::DumpTypeInfoForLoadedTypes(ClassLinker* linker) {
327   struct CollectClasses : public ClassVisitor {
328     bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
329       classes_.push_back(klass.Ptr());
330       return true;
331     }
332     std::vector<mirror::Class*> classes_;
333   };
334 
335   if (jit_compiler_->GenerateDebugInfo()) {
336     ScopedObjectAccess so(Thread::Current());
337 
338     CollectClasses visitor;
339     linker->VisitClasses(&visitor);
340     jit_compiler_->TypesLoaded(visitor.classes_.data(), visitor.classes_.size());
341   }
342 }
343 
344 extern "C" void art_quick_osr_stub(void** stack,
345                                    size_t stack_size_in_bytes,
346                                    const uint8_t* native_pc,
347                                    JValue* result,
348                                    const char* shorty,
349                                    Thread* self);
350 
PrepareForOsr(ArtMethod * method,uint32_t dex_pc,uint32_t * vregs)351 OsrData* Jit::PrepareForOsr(ArtMethod* method, uint32_t dex_pc, uint32_t* vregs) {
352   if (!kEnableOnStackReplacement) {
353     return nullptr;
354   }
355 
356   // Cheap check if the method has been compiled already. That's an indicator that we should
357   // osr into it.
358   if (!GetCodeCache()->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
359     return nullptr;
360   }
361 
362   // Fetch some data before looking up for an OSR method. We don't want thread
363   // suspension once we hold an OSR method, as the JIT code cache could delete the OSR
364   // method while we are being suspended.
365   CodeItemDataAccessor accessor(method->DexInstructionData());
366   const size_t number_of_vregs = accessor.RegistersSize();
367   std::string method_name(VLOG_IS_ON(jit) ? method->PrettyMethod() : "");
368   OsrData* osr_data = nullptr;
369 
370   {
371     ScopedAssertNoThreadSuspension sts("Holding OSR method");
372     const OatQuickMethodHeader* osr_method = GetCodeCache()->LookupOsrMethodHeader(method);
373     if (osr_method == nullptr) {
374       // No osr method yet, just return to the interpreter.
375       return nullptr;
376     }
377 
378     CodeInfo code_info(osr_method);
379 
380     // Find stack map starting at the target dex_pc.
381     StackMap stack_map = code_info.GetOsrStackMapForDexPc(dex_pc);
382     if (!stack_map.IsValid()) {
383       // There is no OSR stack map for this dex pc offset. Just return to the interpreter in the
384       // hope that the next branch has one.
385       return nullptr;
386     }
387 
388     // We found a stack map, now fill the frame with dex register values from the interpreter's
389     // shadow frame.
390     DexRegisterMap vreg_map = code_info.GetDexRegisterMapOf(stack_map);
391     DCHECK_EQ(vreg_map.size(), number_of_vregs);
392 
393     size_t frame_size = osr_method->GetFrameSizeInBytes();
394 
395     // Allocate memory to put shadow frame values. The osr stub will copy that memory to
396     // stack.
397     // Note that we could pass the shadow frame to the stub, and let it copy the values there,
398     // but that is engineering complexity not worth the effort for something like OSR.
399     osr_data = reinterpret_cast<OsrData*>(malloc(sizeof(OsrData) + frame_size));
400     if (osr_data == nullptr) {
401       return nullptr;
402     }
403     memset(osr_data, 0, sizeof(OsrData) + frame_size);
404     osr_data->frame_size = frame_size;
405 
406     // Art ABI: ArtMethod is at the bottom of the stack.
407     osr_data->memory[0] = method;
408 
409     if (vreg_map.empty()) {
410       // If we don't have a dex register map, then there are no live dex registers at
411       // this dex pc.
412     } else {
413       for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
414         DexRegisterLocation::Kind location = vreg_map[vreg].GetKind();
415         if (location == DexRegisterLocation::Kind::kNone) {
416           // Dex register is dead or uninitialized.
417           continue;
418         }
419 
420         if (location == DexRegisterLocation::Kind::kConstant) {
421           // We skip constants because the compiled code knows how to handle them.
422           continue;
423         }
424 
425         DCHECK_EQ(location, DexRegisterLocation::Kind::kInStack);
426 
427         int32_t vreg_value = vregs[vreg];
428         int32_t slot_offset = vreg_map[vreg].GetStackOffsetInBytes();
429         DCHECK_LT(slot_offset, static_cast<int32_t>(frame_size));
430         DCHECK_GT(slot_offset, 0);
431         (reinterpret_cast<int32_t*>(osr_data->memory))[slot_offset / sizeof(int32_t)] = vreg_value;
432       }
433     }
434 
435     osr_data->native_pc = stack_map.GetNativePcOffset(kRuntimeISA) +
436         osr_method->GetEntryPoint();
437     VLOG(jit) << "Jumping to "
438               << method_name
439               << "@"
440               << std::hex << reinterpret_cast<uintptr_t>(osr_data->native_pc);
441   }
442   return osr_data;
443 }
444 
MaybeDoOnStackReplacement(Thread * thread,ArtMethod * method,uint32_t dex_pc,int32_t dex_pc_offset,JValue * result)445 bool Jit::MaybeDoOnStackReplacement(Thread* thread,
446                                     ArtMethod* method,
447                                     uint32_t dex_pc,
448                                     int32_t dex_pc_offset,
449                                     JValue* result) {
450   Jit* jit = Runtime::Current()->GetJit();
451   if (jit == nullptr) {
452     return false;
453   }
454 
455   if (UNLIKELY(__builtin_frame_address(0) < thread->GetStackEnd())) {
456     // Don't attempt to do an OSR if we are close to the stack limit. Since
457     // the interpreter frames are still on stack, OSR has the potential
458     // to stack overflow even for a simple loop.
459     // b/27094810.
460     return false;
461   }
462 
463   // Get the actual Java method if this method is from a proxy class. The compiler
464   // and the JIT code cache do not expect methods from proxy classes.
465   method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
466 
467   // Before allowing the jump, make sure no code is actively inspecting the method to avoid
468   // jumping from interpreter to OSR while e.g. single stepping. Note that we could selectively
469   // disable OSR when single stepping, but that's currently hard to know at this point.
470   // Currently, HaveLocalsChanged is not frame specific. It is possible to make it frame specific
471   // to allow OSR of frames that don't have any locals changed but it isn't worth the additional
472   // complexity.
473   if (Runtime::Current()->GetInstrumentation()->NeedsSlowInterpreterForMethod(thread, method) ||
474       Runtime::Current()->GetRuntimeCallbacks()->HaveLocalsChanged()) {
475     return false;
476   }
477 
478   ShadowFrame* shadow_frame = thread->GetManagedStack()->GetTopShadowFrame();
479   OsrData* osr_data = jit->PrepareForOsr(method,
480                                          dex_pc + dex_pc_offset,
481                                          shadow_frame->GetVRegArgs(0));
482 
483   if (osr_data == nullptr) {
484     return false;
485   }
486 
487   {
488     thread->PopShadowFrame();
489     ManagedStack fragment;
490     thread->PushManagedStackFragment(&fragment);
491     (*art_quick_osr_stub)(osr_data->memory,
492                           osr_data->frame_size,
493                           osr_data->native_pc,
494                           result,
495                           method->GetShorty(),
496                           thread);
497 
498     if (UNLIKELY(thread->GetException() == Thread::GetDeoptimizationException())) {
499       thread->DeoptimizeWithDeoptimizationException(result);
500     }
501     thread->PopManagedStackFragment(fragment);
502   }
503   free(osr_data);
504   thread->PushShadowFrame(shadow_frame);
505   VLOG(jit) << "Done running OSR code for " << method->PrettyMethod();
506   return true;
507 }
508 
AddMemoryUsage(ArtMethod * method,size_t bytes)509 void Jit::AddMemoryUsage(ArtMethod* method, size_t bytes) {
510   if (bytes > 4 * MB) {
511     LOG(INFO) << "Compiler allocated "
512               << PrettySize(bytes)
513               << " to compile "
514               << ArtMethod::PrettyMethod(method);
515   }
516   MutexLock mu(Thread::Current(), lock_);
517   memory_use_.AddValue(bytes);
518 }
519 
NotifyZygoteCompilationDone()520 void Jit::NotifyZygoteCompilationDone() {
521   if (fd_methods_ == -1) {
522     return;
523   }
524 
525   size_t offset = 0;
526   for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
527     const ImageHeader& header = space->GetImageHeader();
528     const ImageSection& section = header.GetMethodsSection();
529     // Because mremap works at page boundaries, we can only handle methods
530     // within a page range. For methods that falls above or below the range,
531     // the child processes will copy their contents to their private mapping
532     // in `child_mapping_methods`. See `MapBootImageMethods`.
533     uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), gPageSize);
534     uint8_t* page_end =
535         AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), gPageSize);
536     if (page_end > page_start) {
537       uint64_t capacity = page_end - page_start;
538       memcpy(zygote_mapping_methods_.Begin() + offset, page_start, capacity);
539       offset += capacity;
540     }
541   }
542 
543   // Do an msync to ensure we are not affected by writes still being in caches.
544   if (msync(zygote_mapping_methods_.Begin(), fd_methods_size_, MS_SYNC) != 0) {
545     PLOG(WARNING) << "Failed to sync boot image methods memory";
546     code_cache_->GetZygoteMap()->SetCompilationState(ZygoteCompilationState::kNotifiedFailure);
547     return;
548   }
549 
550   // We don't need the shared mapping anymore, and we need to drop it in case
551   // the file hasn't been sealed writable.
552   zygote_mapping_methods_ = MemMap::Invalid();
553 
554   // Seal writes now. Zygote and children will map the memory private in order
555   // to write to it.
556   if (fcntl(fd_methods_, F_ADD_SEALS, F_SEAL_SEAL | F_SEAL_WRITE) == -1) {
557     PLOG(WARNING) << "Failed to seal boot image methods file descriptor";
558     code_cache_->GetZygoteMap()->SetCompilationState(ZygoteCompilationState::kNotifiedFailure);
559     return;
560   }
561 
562   std::string error_str;
563   MemMap child_mapping_methods = MemMap::MapFile(
564       fd_methods_size_,
565       PROT_READ | PROT_WRITE,
566       MAP_PRIVATE,
567       fd_methods_,
568       /* start= */ 0,
569       /* low_4gb= */ false,
570       "boot-image-methods",
571       &error_str);
572 
573   if (!child_mapping_methods.IsValid()) {
574     LOG(WARNING) << "Failed to create child mapping of boot image methods: " << error_str;
575     code_cache_->GetZygoteMap()->SetCompilationState(ZygoteCompilationState::kNotifiedFailure);
576     return;
577   }
578 
579   // Ensure the contents are the same as before: there was a window between
580   // the memcpy and the sealing where other processes could have changed the
581   // contents.
582   // Note this would not be needed if we could have used F_SEAL_FUTURE_WRITE,
583   // see b/143833776.
584   offset = 0;
585   for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
586     const ImageHeader& header = space->GetImageHeader();
587     const ImageSection& section = header.GetMethodsSection();
588     // Because mremap works at page boundaries, we can only handle methods
589     // within a page range. For methods that falls above or below the range,
590     // the child processes will copy their contents to their private mapping
591     // in `child_mapping_methods`. See `MapBootImageMethods`.
592     uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), gPageSize);
593     uint8_t* page_end =
594         AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), gPageSize);
595     if (page_end > page_start) {
596       uint64_t capacity = page_end - page_start;
597       if (memcmp(child_mapping_methods.Begin() + offset, page_start, capacity) != 0) {
598         LOG(WARNING) << "Contents differ in boot image methods data";
599         code_cache_->GetZygoteMap()->SetCompilationState(
600             ZygoteCompilationState::kNotifiedFailure);
601         return;
602       }
603       offset += capacity;
604     }
605   }
606 
607   // Future spawned processes don't need the fd anymore.
608   fd_methods_.reset();
609 
610   // In order to have the zygote and children share the memory, we also remap
611   // the memory into the zygote process.
612   offset = 0;
613   for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
614     const ImageHeader& header = space->GetImageHeader();
615     const ImageSection& section = header.GetMethodsSection();
616     // Because mremap works at page boundaries, we can only handle methods
617     // within a page range. For methods that falls above or below the range,
618     // the child processes will copy their contents to their private mapping
619     // in `child_mapping_methods`. See `MapBootImageMethods`.
620     uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), gPageSize);
621     uint8_t* page_end =
622         AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), gPageSize);
623     if (page_end > page_start) {
624       uint64_t capacity = page_end - page_start;
625       if (mremap(child_mapping_methods.Begin() + offset,
626                  capacity,
627                  capacity,
628                  MREMAP_FIXED | MREMAP_MAYMOVE,
629                  page_start) == MAP_FAILED) {
630         // Failing to remap is safe as the process will just use the old
631         // contents.
632         PLOG(WARNING) << "Failed mremap of boot image methods of " << space->GetImageFilename();
633       }
634       offset += capacity;
635     }
636   }
637 
638   LOG(INFO) << "Successfully notified child processes on sharing boot image methods";
639 
640   // Mark that compilation of boot classpath is done, and memory can now be
641   // shared. Other processes will pick up this information.
642   code_cache_->GetZygoteMap()->SetCompilationState(ZygoteCompilationState::kNotifiedOk);
643 
644   // The private mapping created for this process has been mremaped. We can
645   // reset it.
646   child_mapping_methods.Reset();
647 }
648 
649 class JitCompileTask final : public Task {
650  public:
651   enum class TaskKind {
652     kCompile,
653     kPreCompile,
654   };
655 
JitCompileTask(ArtMethod * method,TaskKind task_kind,CompilationKind compilation_kind)656   JitCompileTask(ArtMethod* method,
657                  TaskKind task_kind,
658                  CompilationKind compilation_kind)
659       : method_(method),
660         kind_(task_kind),
661         compilation_kind_(compilation_kind) {
662   }
663 
Run(Thread * self)664   void Run(Thread* self) override {
665     {
666       ScopedObjectAccess soa(self);
667       switch (kind_) {
668         case TaskKind::kCompile:
669         case TaskKind::kPreCompile: {
670           Runtime::Current()->GetJit()->CompileMethodInternal(
671               method_,
672               self,
673               compilation_kind_,
674               /* prejit= */ (kind_ == TaskKind::kPreCompile));
675           break;
676         }
677       }
678     }
679     ProfileSaver::NotifyJitActivity();
680   }
681 
Finalize()682   void Finalize() override {
683     JitThreadPool* thread_pool = Runtime::Current()->GetJit()->GetThreadPool();
684     if (thread_pool != nullptr) {
685       thread_pool->Remove(this);
686     }
687     delete this;
688   }
689 
GetArtMethod() const690   ArtMethod* GetArtMethod() const {
691     return method_;
692   }
693 
GetCompilationKind() const694   CompilationKind GetCompilationKind() const {
695     return compilation_kind_;
696   }
697 
698  private:
699   ArtMethod* const method_;
700   const TaskKind kind_;
701   const CompilationKind compilation_kind_;
702 
703   DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask);
704 };
705 
GetProfileFile(const std::string & dex_location)706 static std::string GetProfileFile(const std::string& dex_location) {
707   // Hardcoded assumption where the profile file is.
708   // TODO(ngeoffray): this is brittle and we would need to change change if we
709   // wanted to do more eager JITting of methods in a profile. This is
710   // currently only for system server.
711   return dex_location + ".prof";
712 }
713 
GetBootProfileFile(const std::string & profile)714 static std::string GetBootProfileFile(const std::string& profile) {
715   // The boot profile can be found next to the compilation profile, with a
716   // different extension.
717   return ReplaceFileExtension(profile, "bprof");
718 }
719 
720 // Return whether the address is guaranteed to be backed by a file or is shared.
721 // This information can be used to know whether MADV_DONTNEED will make
722 // following accesses repopulate the memory or return zero.
IsAddressKnownBackedByFileOrShared(const void * addr)723 static bool IsAddressKnownBackedByFileOrShared(const void* addr) {
724   // We use the Linux pagemap interface for knowing if an address is backed
725   // by a file or is shared. See:
726   // https://www.kernel.org/doc/Documentation/vm/pagemap.txt
727   const size_t page_size = MemMap::GetPageSize();
728   uintptr_t vmstart = reinterpret_cast<uintptr_t>(AlignDown(addr, page_size));
729   off_t index = (vmstart / page_size) * sizeof(uint64_t);
730   android::base::unique_fd pagemap(open("/proc/self/pagemap", O_RDONLY | O_CLOEXEC));
731   if (pagemap == -1) {
732     return false;
733   }
734   if (lseek(pagemap, index, SEEK_SET) != index) {
735     return false;
736   }
737   uint64_t flags;
738   if (read(pagemap, &flags, sizeof(uint64_t)) != sizeof(uint64_t)) {
739     return false;
740   }
741   // From https://www.kernel.org/doc/Documentation/vm/pagemap.txt:
742   //  * Bit  61    page is file-page or shared-anon (since 3.5)
743   return (flags & (1LL << 61)) != 0;
744 }
745 
746 /**
747  * A JIT task to run after all profile compilation is done.
748  */
749 class JitDoneCompilingProfileTask final : public SelfDeletingTask {
750  public:
JitDoneCompilingProfileTask(const std::vector<const DexFile * > & dex_files)751   explicit JitDoneCompilingProfileTask(const std::vector<const DexFile*>& dex_files)
752       : dex_files_(dex_files) {}
753 
Run(Thread * self)754   void Run([[maybe_unused]] Thread* self) override {
755     // Madvise DONTNEED dex files now that we're done compiling methods.
756     for (const DexFile* dex_file : dex_files_) {
757       if (IsAddressKnownBackedByFileOrShared(dex_file->Begin())) {
758         int result = madvise(const_cast<uint8_t*>(AlignDown(dex_file->Begin(), gPageSize)),
759                              RoundUp(dex_file->Size(), gPageSize),
760                              MADV_DONTNEED);
761         if (result == -1) {
762           PLOG(WARNING) << "Madvise failed";
763         }
764       }
765     }
766   }
767 
768  private:
769   std::vector<const DexFile*> dex_files_;
770 
771   DISALLOW_COPY_AND_ASSIGN(JitDoneCompilingProfileTask);
772 };
773 
774 class JitZygoteDoneCompilingTask final : public SelfDeletingTask {
775  public:
JitZygoteDoneCompilingTask()776   JitZygoteDoneCompilingTask() {}
777 
Run(Thread * self)778   void Run([[maybe_unused]] Thread* self) override {
779     DCHECK(Runtime::Current()->IsZygote());
780     Runtime::Current()->GetJit()->GetCodeCache()->GetZygoteMap()->SetCompilationState(
781         ZygoteCompilationState::kDone);
782   }
783 
784  private:
785   DISALLOW_COPY_AND_ASSIGN(JitZygoteDoneCompilingTask);
786 };
787 
788 /**
789  * A JIT task to run Java verification of boot classpath classes that were not
790  * verified at compile-time.
791  */
792 class ZygoteVerificationTask final : public Task {
793  public:
ZygoteVerificationTask()794   ZygoteVerificationTask() {}
795 
Run(Thread * self)796   void Run(Thread* self) override {
797     // We are going to load class and run verification, which may also need to load
798     // classes. If the thread cannot load classes (typically when the runtime is
799     // debuggable), then just return.
800     if (!self->CanLoadClasses()) {
801       return;
802     }
803     Runtime* runtime = Runtime::Current();
804     ClassLinker* linker = runtime->GetClassLinker();
805     const std::vector<const DexFile*>& boot_class_path =
806         runtime->GetClassLinker()->GetBootClassPath();
807     ScopedObjectAccess soa(self);
808     StackHandleScope<1> hs(self);
809     MutableHandle<mirror::Class> klass = hs.NewHandle<mirror::Class>(nullptr);
810     uint64_t start_ns = ThreadCpuNanoTime();
811     uint64_t number_of_classes = 0;
812     for (const DexFile* dex_file : boot_class_path) {
813       for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
814         const dex::ClassDef& class_def = dex_file->GetClassDef(i);
815         const char* descriptor = dex_file->GetClassDescriptor(class_def);
816         klass.Assign(linker->LookupResolvedType(descriptor, /* class_loader= */ nullptr));
817         if (klass == nullptr) {
818           // Class not loaded yet.
819           DCHECK(!self->IsExceptionPending());
820           continue;
821         }
822         if (klass->IsVerified()) {
823           continue;
824         }
825         if (linker->VerifyClass(self, /* verifier_deps= */ nullptr, klass) ==
826                 verifier::FailureKind::kHardFailure) {
827           CHECK(self->IsExceptionPending());
828           LOG(WARNING) << "Methods in the boot classpath failed to verify: "
829                        << self->GetException()->Dump();
830           self->ClearException();
831         } else {
832           ++number_of_classes;
833         }
834         CHECK(!self->IsExceptionPending());
835       }
836     }
837     LOG(INFO) << "Background verification of "
838               << number_of_classes
839               << " classes from boot classpath took "
840               << PrettyDuration(ThreadCpuNanoTime() - start_ns);
841   }
842 };
843 
844 class ZygoteTask final : public Task {
845  public:
ZygoteTask()846   ZygoteTask() {}
847 
Run(Thread * self)848   void Run(Thread* self) override {
849     Runtime* runtime = Runtime::Current();
850     uint32_t added_to_queue = 0;
851     for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
852       const std::vector<const DexFile*>& boot_class_path =
853           runtime->GetClassLinker()->GetBootClassPath();
854       ScopedNullHandle<mirror::ClassLoader> null_handle;
855       // We avoid doing compilation at boot for the secondary zygote, as apps forked from it are not
856       // critical for boot.
857       if (Runtime::Current()->IsPrimaryZygote()) {
858         for (const std::string& profile_file : space->GetProfileFiles()) {
859           std::string boot_profile = GetBootProfileFile(profile_file);
860           LOG(INFO) << "JIT Zygote looking at boot profile " << boot_profile;
861 
862           // We add to the queue for zygote so that we can fork processes in-between compilations.
863           added_to_queue += runtime->GetJit()->CompileMethodsFromBootProfile(
864               self, boot_class_path, boot_profile, null_handle, /* add_to_queue= */ true);
865         }
866       }
867       for (const std::string& profile_file : space->GetProfileFiles()) {
868         LOG(INFO) << "JIT Zygote looking at profile " << profile_file;
869 
870         added_to_queue += runtime->GetJit()->CompileMethodsFromProfile(
871             self, boot_class_path, profile_file, null_handle, /* add_to_queue= */ true);
872       }
873     }
874     DCHECK(runtime->GetJit()->InZygoteUsingJit());
875     runtime->GetJit()->AddPostBootTask(self, new JitZygoteDoneCompilingTask());
876 
877     JitCodeCache* code_cache = runtime->GetJit()->GetCodeCache();
878     code_cache->GetZygoteMap()->Initialize(added_to_queue);
879   }
880 
Finalize()881   void Finalize() override {
882     delete this;
883   }
884 
885  private:
886   DISALLOW_COPY_AND_ASSIGN(ZygoteTask);
887 };
888 
889 class JitProfileTask final : public Task {
890  public:
JitProfileTask(const std::vector<std::unique_ptr<const DexFile>> & dex_files,jobject class_loader)891   JitProfileTask(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
892                  jobject class_loader) {
893     ScopedObjectAccess soa(Thread::Current());
894     StackHandleScope<1> hs(soa.Self());
895     Handle<mirror::ClassLoader> h_loader(hs.NewHandle(
896         soa.Decode<mirror::ClassLoader>(class_loader)));
897     ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
898     for (const auto& dex_file : dex_files) {
899       dex_files_.push_back(dex_file.get());
900       // Register the dex file so that we can guarantee it doesn't get deleted
901       // while reading it during the task.
902       class_linker->RegisterDexFile(*dex_file.get(), h_loader.Get());
903     }
904     // We also create our own global ref to use this class loader later.
905     class_loader_ = soa.Vm()->AddGlobalRef(soa.Self(), h_loader.Get());
906   }
907 
Run(Thread * self)908   void Run(Thread* self) override {
909     ScopedObjectAccess soa(self);
910     StackHandleScope<1> hs(self);
911     Handle<mirror::ClassLoader> loader = hs.NewHandle<mirror::ClassLoader>(
912         soa.Decode<mirror::ClassLoader>(class_loader_));
913 
914     std::string profile = GetProfileFile(dex_files_[0]->GetLocation());
915     std::string boot_profile = GetBootProfileFile(profile);
916 
917     Jit* jit = Runtime::Current()->GetJit();
918 
919     jit->CompileMethodsFromBootProfile(
920         self,
921         dex_files_,
922         boot_profile,
923         loader,
924         /* add_to_queue= */ false);
925 
926     jit->CompileMethodsFromProfile(
927         self,
928         dex_files_,
929         profile,
930         loader,
931         /* add_to_queue= */ true);
932   }
933 
Finalize()934   void Finalize() override {
935     delete this;
936   }
937 
~JitProfileTask()938   ~JitProfileTask() {
939     ScopedObjectAccess soa(Thread::Current());
940     soa.Vm()->DeleteGlobalRef(soa.Self(), class_loader_);
941   }
942 
943  private:
944   std::vector<const DexFile*> dex_files_;
945   jobject class_loader_;
946 
947   DISALLOW_COPY_AND_ASSIGN(JitProfileTask);
948 };
949 
CopyIfDifferent(void * s1,const void * s2,size_t n)950 static void CopyIfDifferent(void* s1, const void* s2, size_t n) {
951   if (memcmp(s1, s2, n) != 0) {
952     memcpy(s1, s2, n);
953   }
954 }
955 
MapBootImageMethods()956 void Jit::MapBootImageMethods() {
957   if (Runtime::Current()->IsJavaDebuggable()) {
958     LOG(INFO) << "Not mapping boot image methods due to process being debuggable";
959     return;
960   }
961   CHECK_NE(fd_methods_.get(), -1);
962   if (!code_cache_->GetZygoteMap()->CanMapBootImageMethods()) {
963     LOG(WARNING) << "Not mapping boot image methods due to error from zygote";
964     // We don't need the fd anymore.
965     fd_methods_.reset();
966     return;
967   }
968 
969   std::string error_str;
970   MemMap child_mapping_methods = MemMap::MapFile(
971       fd_methods_size_,
972       PROT_READ | PROT_WRITE,
973       MAP_PRIVATE,
974       fd_methods_,
975       /* start= */ 0,
976       /* low_4gb= */ false,
977       "boot-image-methods",
978       &error_str);
979 
980   // We don't need the fd anymore.
981   fd_methods_.reset();
982 
983   if (!child_mapping_methods.IsValid()) {
984     LOG(WARNING) << "Failed to create child mapping of boot image methods: " << error_str;
985     return;
986   }
987   //  We are going to mremap the child mapping into the image:
988   //
989   //                            ImageSection       ChildMappingMethods
990   //
991   //         section start -->  -----------
992   //                            |         |
993   //                            |         |
994   //            page_start -->  |         |   <-----   -----------
995   //                            |         |            |         |
996   //                            |         |            |         |
997   //                            |         |            |         |
998   //                            |         |            |         |
999   //                            |         |            |         |
1000   //                            |         |            |         |
1001   //                            |         |            |         |
1002   //             page_end  -->  |         |   <-----   -----------
1003   //                            |         |
1004   //         section end   -->  -----------
1005   //
1006   size_t offset = 0;
1007   for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
1008     const ImageHeader& header = space->GetImageHeader();
1009     const ImageSection& section = header.GetMethodsSection();
1010     uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), gPageSize);
1011     uint8_t* page_end =
1012         AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), gPageSize);
1013     if (page_end <= page_start) {
1014       // Section doesn't contain one aligned entire page.
1015       continue;
1016     }
1017     uint64_t capacity = page_end - page_start;
1018     // Walk over methods in the boot image, and check for:
1019     // 1) methods whose class is not initialized in the process, but are in the
1020     // zygote process. For such methods, we need their entrypoints to be stubs
1021     // that do the initialization check.
1022     // 2) native methods whose data pointer is different than the one in the
1023     // zygote. Such methods may have had custom native implementation provided
1024     // by JNI RegisterNatives.
1025     header.VisitPackedArtMethods([&](ArtMethod& method) NO_THREAD_SAFETY_ANALYSIS {
1026       // Methods in the boot image should never have their single
1027       // implementation flag set (and therefore never have a `data_` pointing
1028       // to an ArtMethod for single implementation).
1029       CHECK(method.IsIntrinsic() || !method.HasSingleImplementationFlag());
1030       if (method.IsRuntimeMethod()) {
1031         return;
1032       }
1033 
1034       // Pointer to the method we're currently using.
1035       uint8_t* pointer = reinterpret_cast<uint8_t*>(&method);
1036       // The data pointer of that method that we want to keep.
1037       uint8_t* data_pointer = pointer + ArtMethod::DataOffset(kRuntimePointerSize).Int32Value();
1038       if (method.IsNative() && data_pointer >= page_start && data_pointer < page_end) {
1039         // The data pointer of the ArtMethod in the shared memory we are going to remap into our
1040         // own mapping. This is the data that we will see after the remap.
1041         uint8_t* new_data_pointer =
1042             child_mapping_methods.Begin() + offset + (data_pointer - page_start);
1043         CopyIfDifferent(new_data_pointer, data_pointer, sizeof(void*));
1044       }
1045 
1046       // The entrypoint of the method we're currently using and that we want to
1047       // keep.
1048       uint8_t* entry_point_pointer = pointer +
1049           ArtMethod::EntryPointFromQuickCompiledCodeOffset(kRuntimePointerSize).Int32Value();
1050       if (!method.GetDeclaringClassUnchecked()->IsVisiblyInitialized() &&
1051           method.IsStatic() &&
1052           !method.IsConstructor() &&
1053           entry_point_pointer >= page_start &&
1054           entry_point_pointer < page_end) {
1055         // The entry point of the ArtMethod in the shared memory we are going to remap into our
1056         // own mapping. This is the entrypoint that we will see after the remap.
1057         uint8_t* new_entry_point_pointer =
1058             child_mapping_methods.Begin() + offset + (entry_point_pointer - page_start);
1059         CopyIfDifferent(new_entry_point_pointer, entry_point_pointer, sizeof(void*));
1060       }
1061     }, space->Begin(), kRuntimePointerSize);
1062 
1063     // Map the memory in the boot image range.
1064     if (mremap(child_mapping_methods.Begin() + offset,
1065                capacity,
1066                capacity,
1067                MREMAP_FIXED | MREMAP_MAYMOVE,
1068                page_start) == MAP_FAILED) {
1069       PLOG(WARNING) << "Fail to mremap boot image methods for " << space->GetImageFilename();
1070     }
1071     offset += capacity;
1072   }
1073 
1074   // The private mapping created for this process has been mremaped. We can
1075   // reset it.
1076   child_mapping_methods.Reset();
1077   LOG(INFO) << "Successfully mapped boot image methods";
1078 }
1079 
InZygoteUsingJit()1080 bool Jit::InZygoteUsingJit() {
1081   Runtime* runtime = Runtime::Current();
1082   return runtime->IsZygote() && runtime->HasImageWithProfile() && runtime->UseJitCompilation();
1083 }
1084 
CreateThreadPool()1085 void Jit::CreateThreadPool() {
1086   // There is a DCHECK in the 'AddSamples' method to ensure the tread pool
1087   // is not null when we instrument.
1088 
1089   thread_pool_.reset(JitThreadPool::Create("Jit thread pool", 1));
1090 
1091   Runtime* runtime = Runtime::Current();
1092   thread_pool_->SetPthreadPriority(
1093       runtime->IsZygote()
1094           ? options_->GetZygoteThreadPoolPthreadPriority()
1095           : options_->GetThreadPoolPthreadPriority());
1096   Start();
1097 
1098   if (runtime->IsZygote()) {
1099     // To speed up class lookups, generate a type lookup table for
1100     // dex files not backed by oat file.
1101     for (const DexFile* dex_file : runtime->GetClassLinker()->GetBootClassPath()) {
1102       if (dex_file->GetOatDexFile() == nullptr) {
1103         TypeLookupTable type_lookup_table = TypeLookupTable::Create(*dex_file);
1104         type_lookup_tables_.push_back(
1105             std::make_unique<art::OatDexFile>(std::move(type_lookup_table)));
1106         dex_file->SetOatDexFile(type_lookup_tables_.back().get());
1107       }
1108     }
1109 
1110     // Add a task that will verify boot classpath jars that were not
1111     // pre-compiled.
1112     thread_pool_->AddTask(Thread::Current(), new ZygoteVerificationTask());
1113   }
1114 
1115   if (InZygoteUsingJit()) {
1116     // If we have an image with a profile, request a JIT task to
1117     // compile all methods in that profile.
1118     thread_pool_->AddTask(Thread::Current(), new ZygoteTask());
1119 
1120     // And create mappings to share boot image methods memory from the zygote to
1121     // child processes.
1122 
1123     // Compute the total capacity required for the boot image methods.
1124     uint64_t total_capacity = 0;
1125     for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
1126       const ImageHeader& header = space->GetImageHeader();
1127       const ImageSection& section = header.GetMethodsSection();
1128       // Mappings need to be at the page level.
1129       uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), gPageSize);
1130       uint8_t* page_end =
1131           AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), gPageSize);
1132       if (page_end > page_start) {
1133         total_capacity += (page_end - page_start);
1134       }
1135     }
1136 
1137     // Create the child and zygote mappings to the boot image methods.
1138     if (total_capacity > 0) {
1139       // Start with '/boot' and end with '.art' to match the pattern recognized
1140       // by android_os_Debug.cpp for boot images.
1141       const char* name = "/boot-image-methods.art";
1142       unique_fd mem_fd =
1143           unique_fd(art::memfd_create(name, /* flags= */ MFD_ALLOW_SEALING | MFD_CLOEXEC));
1144       if (mem_fd.get() == -1) {
1145         PLOG(WARNING) << "Could not create boot image methods file descriptor";
1146         return;
1147       }
1148       if (ftruncate(mem_fd.get(), total_capacity) != 0) {
1149         PLOG(WARNING) << "Failed to truncate boot image methods file to " << total_capacity;
1150         return;
1151       }
1152       std::string error_str;
1153 
1154       // Create the shared mapping eagerly, as this prevents other processes
1155       // from adding the writable seal.
1156       zygote_mapping_methods_ = MemMap::MapFile(
1157         total_capacity,
1158         PROT_READ | PROT_WRITE,
1159         MAP_SHARED,
1160         mem_fd,
1161         /* start= */ 0,
1162         /* low_4gb= */ false,
1163         "boot-image-methods",
1164         &error_str);
1165 
1166       if (!zygote_mapping_methods_.IsValid()) {
1167         LOG(WARNING) << "Failed to create zygote mapping of boot image methods:  " << error_str;
1168         return;
1169       }
1170       if (zygote_mapping_methods_.MadviseDontFork() != 0) {
1171         LOG(WARNING) << "Failed to madvise dont fork boot image methods";
1172         zygote_mapping_methods_ = MemMap();
1173         return;
1174       }
1175 
1176       // We should use the F_SEAL_FUTURE_WRITE flag, but this has unexpected
1177       // behavior on private mappings after fork (the mapping becomes shared between
1178       // parent and children), see b/143833776.
1179       // We will seal the write once we are done writing to the shared mapping.
1180       if (fcntl(mem_fd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW) == -1) {
1181         PLOG(WARNING) << "Failed to seal boot image methods file descriptor";
1182         zygote_mapping_methods_ = MemMap();
1183         return;
1184       }
1185       fd_methods_ = unique_fd(mem_fd.release());
1186       fd_methods_size_ = total_capacity;
1187     }
1188   }
1189 }
1190 
RegisterDexFiles(const std::vector<std::unique_ptr<const DexFile>> & dex_files,jobject class_loader)1191 void Jit::RegisterDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
1192                            jobject class_loader) {
1193   if (dex_files.empty()) {
1194     return;
1195   }
1196   Runtime* runtime = Runtime::Current();
1197   // If the runtime is debuggable, don't bother precompiling methods.
1198   // If system server is being profiled, don't precompile as we are going to use
1199   // the JIT to count hotness. Note that --count-hotness-in-compiled-code is
1200   // only forced when we also profile the boot classpath, see
1201   // AndroidRuntime.cpp.
1202   if (runtime->IsSystemServer() &&
1203       UseJitCompilation() &&
1204       options_->UseProfiledJitCompilation() &&
1205       runtime->HasImageWithProfile() &&
1206       !runtime->IsSystemServerProfiled() &&
1207       !runtime->IsJavaDebuggable()) {
1208     // Note: this precompilation is currently not running in production because:
1209     // - UseProfiledJitCompilation() is not set by default.
1210     // - System server dex files are registered *before* we set the runtime as
1211     //   system server (though we are in the system server process).
1212     thread_pool_->AddTask(Thread::Current(), new JitProfileTask(dex_files, class_loader));
1213   }
1214 }
1215 
AddCompileTask(Thread * self,ArtMethod * method,CompilationKind compilation_kind)1216 void Jit::AddCompileTask(Thread* self,
1217                          ArtMethod* method,
1218                          CompilationKind compilation_kind) {
1219   thread_pool_->AddTask(self, method, compilation_kind);
1220 }
1221 
CompileMethodFromProfile(Thread * self,ClassLinker * class_linker,uint32_t method_idx,Handle<mirror::DexCache> dex_cache,Handle<mirror::ClassLoader> class_loader,bool add_to_queue,bool compile_after_boot)1222 bool Jit::CompileMethodFromProfile(Thread* self,
1223                                    ClassLinker* class_linker,
1224                                    uint32_t method_idx,
1225                                    Handle<mirror::DexCache> dex_cache,
1226                                    Handle<mirror::ClassLoader> class_loader,
1227                                    bool add_to_queue,
1228                                    bool compile_after_boot) {
1229   ArtMethod* method = class_linker->ResolveMethodWithoutInvokeType(
1230       method_idx, dex_cache, class_loader);
1231   if (method == nullptr) {
1232     self->ClearException();
1233     return false;
1234   }
1235   if (!method->IsCompilable() || !method->IsInvokable()) {
1236     return false;
1237   }
1238   if (method->IsPreCompiled()) {
1239     // Already seen by another profile.
1240     return false;
1241   }
1242   CompilationKind compilation_kind = CompilationKind::kOptimized;
1243   const void* entry_point = method->GetEntryPointFromQuickCompiledCode();
1244   if (class_linker->IsQuickToInterpreterBridge(entry_point) ||
1245       class_linker->IsQuickGenericJniStub(entry_point) ||
1246       class_linker->IsNterpEntryPoint(entry_point) ||
1247       // We explicitly check for the resolution stub, and not the resolution trampoline.
1248       // The trampoline is for methods backed by a .oat file that has a compiled version of
1249       // the method.
1250       (entry_point == GetQuickResolutionStub())) {
1251     VLOG(jit) << "JIT Zygote processing method " << ArtMethod::PrettyMethod(method)
1252               << " from profile";
1253     method->SetPreCompiled();
1254     if (!add_to_queue) {
1255       CompileMethodInternal(method, self, compilation_kind, /* prejit= */ true);
1256     } else {
1257       Task* task = new JitCompileTask(
1258           method, JitCompileTask::TaskKind::kPreCompile, compilation_kind);
1259       if (compile_after_boot) {
1260         AddPostBootTask(self, task);
1261       } else {
1262         thread_pool_->AddTask(self, task);
1263       }
1264       return true;
1265     }
1266   }
1267   return false;
1268 }
1269 
CompileMethodsFromBootProfile(Thread * self,const std::vector<const DexFile * > & dex_files,const std::string & profile_file,Handle<mirror::ClassLoader> class_loader,bool add_to_queue)1270 uint32_t Jit::CompileMethodsFromBootProfile(
1271     Thread* self,
1272     const std::vector<const DexFile*>& dex_files,
1273     const std::string& profile_file,
1274     Handle<mirror::ClassLoader> class_loader,
1275     bool add_to_queue) {
1276   unix_file::FdFile profile(profile_file, O_RDONLY, true);
1277 
1278   if (profile.Fd() == -1) {
1279     PLOG(WARNING) << "No boot profile: " << profile_file;
1280     return 0u;
1281   }
1282 
1283   ProfileBootInfo profile_info;
1284   if (!profile_info.Load(profile.Fd(), dex_files)) {
1285     LOG(ERROR) << "Could not load profile file: " << profile_file;
1286     return 0u;
1287   }
1288 
1289   ScopedObjectAccess soa(self);
1290   VariableSizedHandleScope handles(self);
1291   std::vector<Handle<mirror::DexCache>> dex_caches;
1292   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1293   for (const DexFile* dex_file : profile_info.GetDexFiles()) {
1294     dex_caches.push_back(handles.NewHandle(class_linker->FindDexCache(self, *dex_file)));
1295   }
1296 
1297   uint32_t added_to_queue = 0;
1298   for (const std::pair<uint32_t, uint32_t>& pair : profile_info.GetMethods()) {
1299     if (CompileMethodFromProfile(self,
1300                                  class_linker,
1301                                  pair.second,
1302                                  dex_caches[pair.first],
1303                                  class_loader,
1304                                  add_to_queue,
1305                                  /*compile_after_boot=*/false)) {
1306       ++added_to_queue;
1307     }
1308   }
1309   return added_to_queue;
1310 }
1311 
CompileMethodsFromProfile(Thread * self,const std::vector<const DexFile * > & dex_files,const std::string & profile_file,Handle<mirror::ClassLoader> class_loader,bool add_to_queue)1312 uint32_t Jit::CompileMethodsFromProfile(
1313     Thread* self,
1314     const std::vector<const DexFile*>& dex_files,
1315     const std::string& profile_file,
1316     Handle<mirror::ClassLoader> class_loader,
1317     bool add_to_queue) {
1318 
1319   if (profile_file.empty()) {
1320     LOG(WARNING) << "Expected a profile file in JIT zygote mode";
1321     return 0u;
1322   }
1323 
1324   // We don't generate boot profiles on device, therefore we don't
1325   // need to lock the file.
1326   unix_file::FdFile profile(profile_file, O_RDONLY, true);
1327 
1328   if (profile.Fd() == -1) {
1329     PLOG(WARNING) << "No profile: " << profile_file;
1330     return 0u;
1331   }
1332 
1333   ProfileCompilationInfo profile_info(/* for_boot_image= */ class_loader.IsNull());
1334   if (!profile_info.Load(profile.Fd())) {
1335     LOG(ERROR) << "Could not load profile file";
1336     return 0u;
1337   }
1338   ScopedObjectAccess soa(self);
1339   StackHandleScope<1> hs(self);
1340   MutableHandle<mirror::DexCache> dex_cache = hs.NewHandle<mirror::DexCache>(nullptr);
1341   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1342   uint32_t added_to_queue = 0u;
1343   for (const DexFile* dex_file : dex_files) {
1344     std::set<dex::TypeIndex> class_types;
1345     std::set<uint16_t> all_methods;
1346     if (!profile_info.GetClassesAndMethods(*dex_file,
1347                                            &class_types,
1348                                            &all_methods,
1349                                            &all_methods,
1350                                            &all_methods)) {
1351       // This means the profile file did not reference the dex file, which is the case
1352       // if there's no classes and methods of that dex file in the profile.
1353       continue;
1354     }
1355     dex_cache.Assign(class_linker->FindDexCache(self, *dex_file));
1356     CHECK(dex_cache != nullptr) << "Could not find dex cache for " << dex_file->GetLocation();
1357 
1358     for (uint16_t method_idx : all_methods) {
1359       if (CompileMethodFromProfile(self,
1360                                    class_linker,
1361                                    method_idx,
1362                                    dex_cache,
1363                                    class_loader,
1364                                    add_to_queue,
1365                                    /*compile_after_boot=*/true)) {
1366         ++added_to_queue;
1367       }
1368     }
1369   }
1370 
1371   // Add a task to run when all compilation is done.
1372   AddPostBootTask(self, new JitDoneCompilingProfileTask(dex_files));
1373   return added_to_queue;
1374 }
1375 
IgnoreSamplesForMethod(ArtMethod * method)1376 bool Jit::IgnoreSamplesForMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
1377   if (method->IsClassInitializer() || !method->IsCompilable()) {
1378     // We do not want to compile such methods.
1379     return true;
1380   }
1381   if (method->IsNative()) {
1382     ObjPtr<mirror::Class> klass = method->GetDeclaringClass();
1383     if (klass == GetClassRoot<mirror::MethodHandle>() ||
1384         klass == GetClassRoot<mirror::VarHandle>()) {
1385       // MethodHandle and VarHandle invocation methods are required to throw an
1386       // UnsupportedOperationException if invoked reflectively. We achieve this by having native
1387       // implementations that raise the exception. We need to disable JIT compilation of these JNI
1388       // methods as it can lead to transitioning between JIT compiled JNI stubs and generic JNI
1389       // stubs. Since these stubs have different stack representations we can then crash in stack
1390       // walking (b/78151261).
1391       return true;
1392     }
1393   }
1394   return false;
1395 }
1396 
EnqueueOptimizedCompilation(ArtMethod * method,Thread * self)1397 void Jit::EnqueueOptimizedCompilation(ArtMethod* method, Thread* self) {
1398   // Note the hotness counter will be reset by the compiled code.
1399 
1400   if (thread_pool_ == nullptr) {
1401     return;
1402   }
1403 
1404   // We arrive here after a baseline compiled code has reached its baseline
1405   // hotness threshold. If we're not only using the baseline compiler, enqueue a compilation
1406   // task that will compile optimize the method.
1407   if (!options_->UseBaselineCompiler()) {
1408     AddCompileTask(self, method, CompilationKind::kOptimized);
1409   }
1410 }
1411 
1412 class ScopedSetRuntimeThread {
1413  public:
ScopedSetRuntimeThread(Thread * self)1414   explicit ScopedSetRuntimeThread(Thread* self)
1415       : self_(self), was_runtime_thread_(self_->IsRuntimeThread()) {
1416     self_->SetIsRuntimeThread(true);
1417   }
1418 
~ScopedSetRuntimeThread()1419   ~ScopedSetRuntimeThread() {
1420     self_->SetIsRuntimeThread(was_runtime_thread_);
1421   }
1422 
1423  private:
1424   Thread* self_;
1425   bool was_runtime_thread_;
1426 };
1427 
MethodEntered(Thread * self,ArtMethod * method)1428 void Jit::MethodEntered(Thread* self, ArtMethod* method) {
1429   Runtime* runtime = Runtime::Current();
1430   if (UNLIKELY(runtime->UseJitCompilation() && JitAtFirstUse())) {
1431     ArtMethod* np_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
1432     if (np_method->IsCompilable()) {
1433       CompileMethod(method, self, CompilationKind::kOptimized, /* prejit= */ false);
1434     }
1435     return;
1436   }
1437 
1438   AddSamples(self, method);
1439 }
1440 
WaitForCompilationToFinish(Thread * self)1441 void Jit::WaitForCompilationToFinish(Thread* self) {
1442   if (thread_pool_ != nullptr) {
1443     thread_pool_->Wait(self, false, false);
1444   }
1445 }
1446 
Stop()1447 void Jit::Stop() {
1448   Thread* self = Thread::Current();
1449   // TODO(ngeoffray): change API to not require calling WaitForCompilationToFinish twice.
1450   WaitForCompilationToFinish(self);
1451   GetThreadPool()->StopWorkers(self);
1452   WaitForCompilationToFinish(self);
1453 }
1454 
Start()1455 void Jit::Start() {
1456   GetThreadPool()->StartWorkers(Thread::Current());
1457 }
1458 
ScopedJitSuspend()1459 ScopedJitSuspend::ScopedJitSuspend() {
1460   jit::Jit* jit = Runtime::Current()->GetJit();
1461   was_on_ = (jit != nullptr) && (jit->GetThreadPool() != nullptr);
1462   if (was_on_) {
1463     jit->Stop();
1464   }
1465 }
1466 
~ScopedJitSuspend()1467 ScopedJitSuspend::~ScopedJitSuspend() {
1468   if (was_on_) {
1469     DCHECK(Runtime::Current()->GetJit() != nullptr);
1470     DCHECK(Runtime::Current()->GetJit()->GetThreadPool() != nullptr);
1471     Runtime::Current()->GetJit()->Start();
1472   }
1473 }
1474 
RunPollingThread(void * arg)1475 static void* RunPollingThread(void* arg) {
1476   Jit* jit = reinterpret_cast<Jit*>(arg);
1477   do {
1478     sleep(10);
1479   } while (!jit->GetCodeCache()->GetZygoteMap()->IsCompilationNotified());
1480 
1481   // We will suspend other threads: we can only do that if we're attached to the
1482   // runtime.
1483   Runtime* runtime = Runtime::Current();
1484   bool thread_attached = runtime->AttachCurrentThread(
1485       "BootImagePollingThread",
1486       /* as_daemon= */ true,
1487       /* thread_group= */ nullptr,
1488       /* create_peer= */ false);
1489   CHECK(thread_attached);
1490 
1491   if (getpriority(PRIO_PROCESS, 0 /* this thread */) == 0) {
1492     // Slightly reduce thread priority, mostly so the suspend logic notices that we're
1493     // not a high priority thread, and can time out more slowly. May fail on host.
1494     (void)setpriority(PRIO_PROCESS, 0 /* this thread */, 1);
1495   } else {
1496     PLOG(ERROR) << "Unexpected BootImagePollingThread priority: " << getpriority(PRIO_PROCESS, 0);
1497   }
1498   {
1499     // Prevent other threads from running while we are remapping the boot image
1500     // ArtMethod's. Native threads might still be running, but they cannot
1501     // change the contents of ArtMethod's.
1502     ScopedSuspendAll ssa(__FUNCTION__);
1503     runtime->GetJit()->MapBootImageMethods();
1504   }
1505 
1506   Runtime::Current()->DetachCurrentThread();
1507   return nullptr;
1508 }
1509 
PostForkChildAction(bool is_system_server,bool is_zygote)1510 void Jit::PostForkChildAction(bool is_system_server, bool is_zygote) {
1511   // Clear the potential boot tasks inherited from the zygote.
1512   {
1513     MutexLock mu(Thread::Current(), boot_completed_lock_);
1514     tasks_after_boot_.clear();
1515   }
1516 
1517   Runtime* const runtime = Runtime::Current();
1518   // Check if we'll need to remap the boot image methods.
1519   if (!is_zygote && fd_methods_ != -1) {
1520     // Create a thread that will poll the status of zygote compilation, and map
1521     // the private mapping of boot image methods.
1522     // For child zygote, we instead query IsCompilationNotified() post zygote fork.
1523     zygote_mapping_methods_.ResetInForkedProcess();
1524     pthread_t polling_thread;
1525     pthread_attr_t attr;
1526     CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread");
1527     CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED),
1528                        "PTHREAD_CREATE_DETACHED");
1529     CHECK_PTHREAD_CALL(
1530         pthread_create,
1531         (&polling_thread, &attr, RunPollingThread, reinterpret_cast<void*>(this)),
1532         "Methods maps thread");
1533   }
1534 
1535   if (is_zygote || runtime->IsSafeMode()) {
1536     // Delete the thread pool, we are not going to JIT.
1537     thread_pool_.reset(nullptr);
1538     return;
1539   }
1540   // At this point, the compiler options have been adjusted to the particular configuration
1541   // of the forked child. Parse them again.
1542   jit_compiler_->ParseCompilerOptions();
1543 
1544   // Adjust the status of code cache collection: the status from zygote was to not collect.
1545   // JitAtFirstUse compiles the methods synchronously on mutator threads. While this should work
1546   // in theory it is causing deadlocks in some jvmti tests related to Jit GC. Hence, disabling
1547   // Jit GC for now (b/147208992).
1548   code_cache_->SetGarbageCollectCode(
1549       !jit_compiler_->GenerateDebugInfo() &&
1550       !JitAtFirstUse());
1551 
1552   if (is_system_server && runtime->HasImageWithProfile()) {
1553     // Disable garbage collection: we don't want it to delete methods we're compiling
1554     // through boot and system server profiles.
1555     // TODO(ngeoffray): Fix this so we still collect deoptimized and unused code.
1556     code_cache_->SetGarbageCollectCode(false);
1557   }
1558 
1559   // We do this here instead of PostZygoteFork, as NativeDebugInfoPostFork only
1560   // applies to a child.
1561   NativeDebugInfoPostFork();
1562 }
1563 
PreZygoteFork()1564 void Jit::PreZygoteFork() {
1565   if (thread_pool_ == nullptr) {
1566     return;
1567   }
1568   thread_pool_->DeleteThreads();
1569 
1570   NativeDebugInfoPreFork();
1571 }
1572 
1573 // Returns the number of threads running.
GetTaskCount()1574 static int GetTaskCount() {
1575   DIR* directory = opendir("/proc/self/task");
1576   if (directory == nullptr) {
1577     return -1;
1578   }
1579 
1580   uint32_t count = 0;
1581   struct dirent* entry = nullptr;
1582   while ((entry = readdir(directory)) != nullptr) {
1583     if ((strcmp(entry->d_name, ".") == 0) || (strcmp(entry->d_name, "..") == 0)) {
1584       continue;
1585     }
1586     ++count;
1587   }
1588   closedir(directory);
1589   return count;
1590 }
1591 
PostZygoteFork()1592 void Jit::PostZygoteFork() {
1593   Runtime* runtime = Runtime::Current();
1594   if (thread_pool_ == nullptr) {
1595     // If this is a child zygote, check if we need to remap the boot image
1596     // methods.
1597     if (runtime->IsZygote() &&
1598         fd_methods_ != -1 &&
1599         code_cache_->GetZygoteMap()->IsCompilationNotified()) {
1600       ScopedSuspendAll ssa(__FUNCTION__);
1601       MapBootImageMethods();
1602     }
1603     return;
1604   }
1605   if (runtime->IsZygote() && code_cache_->GetZygoteMap()->IsCompilationDoneButNotNotified()) {
1606     // Copy the boot image methods data to the mappings we created to share
1607     // with the children. We do this here as we are the only thread running and
1608     // we don't risk other threads concurrently updating the ArtMethod's.
1609     CHECK_EQ(GetTaskCount(), 1);
1610     NotifyZygoteCompilationDone();
1611     CHECK(code_cache_->GetZygoteMap()->IsCompilationNotified());
1612   }
1613   thread_pool_->CreateThreads();
1614   thread_pool_->SetPthreadPriority(
1615       runtime->IsZygote()
1616           ? options_->GetZygoteThreadPoolPthreadPriority()
1617           : options_->GetThreadPoolPthreadPriority());
1618 }
1619 
AddPostBootTask(Thread * self,Task * task)1620 void Jit::AddPostBootTask(Thread* self, Task* task) {
1621   MutexLock mu(self, boot_completed_lock_);
1622   if (boot_completed_) {
1623     thread_pool_->AddTask(self, task);
1624   } else {
1625     tasks_after_boot_.push_back(task);
1626   }
1627 }
1628 
BootCompleted()1629 void Jit::BootCompleted() {
1630   Thread* self = Thread::Current();
1631   std::deque<Task*> tasks;
1632   {
1633     MutexLock mu(self, boot_completed_lock_);
1634     tasks = std::move(tasks_after_boot_);
1635     boot_completed_ = true;
1636   }
1637   for (Task* task : tasks) {
1638     thread_pool_->AddTask(self, task);
1639   }
1640 }
1641 
CanEncodeMethod(ArtMethod * method,bool is_for_shared_region) const1642 bool Jit::CanEncodeMethod(ArtMethod* method, bool is_for_shared_region) const {
1643   return !is_for_shared_region ||
1644       Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(method->GetDeclaringClass());
1645 }
1646 
CanEncodeClass(ObjPtr<mirror::Class> cls,bool is_for_shared_region) const1647 bool Jit::CanEncodeClass(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const {
1648   return !is_for_shared_region || Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(cls);
1649 }
1650 
CanEncodeString(ObjPtr<mirror::String> string,bool is_for_shared_region) const1651 bool Jit::CanEncodeString(ObjPtr<mirror::String> string, bool is_for_shared_region) const {
1652   return !is_for_shared_region || Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(string);
1653 }
1654 
CanAssumeInitialized(ObjPtr<mirror::Class> cls,bool is_for_shared_region) const1655 bool Jit::CanAssumeInitialized(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const {
1656   if (!is_for_shared_region) {
1657     return cls->IsInitialized();
1658   } else {
1659     // Look up the class status in the oat file.
1660     const DexFile& dex_file = *cls->GetDexCache()->GetDexFile();
1661     const OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
1662     // In case we run without an image there won't be a backing oat file.
1663     if (oat_dex_file == nullptr || oat_dex_file->GetOatFile() == nullptr) {
1664       return false;
1665     }
1666     uint16_t class_def_index = cls->GetDexClassDefIndex();
1667     return oat_dex_file->GetOatClass(class_def_index).GetStatus() >= ClassStatus::kInitialized;
1668   }
1669 }
1670 
MaybeEnqueueCompilation(ArtMethod * method,Thread * self)1671 void Jit::MaybeEnqueueCompilation(ArtMethod* method, Thread* self) {
1672   if (thread_pool_ == nullptr) {
1673     return;
1674   }
1675 
1676   if (JitAtFirstUse()) {
1677     // Tests might request JIT on first use (compiled synchronously in the interpreter).
1678     return;
1679   }
1680 
1681   if (!UseJitCompilation()) {
1682     return;
1683   }
1684 
1685   if (IgnoreSamplesForMethod(method)) {
1686     return;
1687   }
1688 
1689   if (GetCodeCache()->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
1690     if (!method->IsNative() && !code_cache_->IsOsrCompiled(method)) {
1691       // If we already have compiled code for it, nterp may be stuck in a loop.
1692       // Compile OSR.
1693       AddCompileTask(self, method, CompilationKind::kOsr);
1694     }
1695     return;
1696   }
1697 
1698   // Check if we have precompiled this method.
1699   if (UNLIKELY(method->IsPreCompiled())) {
1700     if (!method->StillNeedsClinitCheck()) {
1701       const void* entry_point = code_cache_->GetSavedEntryPointOfPreCompiledMethod(method);
1702       if (entry_point != nullptr) {
1703         Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(method, entry_point);
1704       }
1705     }
1706     return;
1707   }
1708 
1709   static constexpr size_t kIndividualSharedMethodHotnessThreshold = 0x3f;
1710   if (method->IsMemorySharedMethod()) {
1711     MutexLock mu(self, lock_);
1712     auto it = shared_method_counters_.find(method);
1713     if (it == shared_method_counters_.end()) {
1714       shared_method_counters_[method] = kIndividualSharedMethodHotnessThreshold;
1715       return;
1716     } else if (it->second != 0) {
1717       DCHECK_LE(it->second, kIndividualSharedMethodHotnessThreshold);
1718       shared_method_counters_[method] = it->second - 1;
1719       return;
1720     } else {
1721       shared_method_counters_[method] = kIndividualSharedMethodHotnessThreshold;
1722     }
1723   }
1724 
1725   if (!method->IsNative() && GetCodeCache()->CanAllocateProfilingInfo()) {
1726     AddCompileTask(self, method, CompilationKind::kBaseline);
1727   } else {
1728     AddCompileTask(self, method, CompilationKind::kOptimized);
1729   }
1730 }
1731 
CompileMethod(ArtMethod * method,Thread * self,CompilationKind compilation_kind,bool prejit)1732 bool Jit::CompileMethod(ArtMethod* method,
1733                         Thread* self,
1734                         CompilationKind compilation_kind,
1735                         bool prejit) {
1736   // Fake being in a runtime thread so that class-load behavior will be the same as normal jit.
1737   ScopedSetRuntimeThread ssrt(self);
1738   // TODO(ngeoffray): For JIT at first use, use kPreCompile. Currently we don't due to
1739   // conflicts with jitzygote optimizations.
1740   return CompileMethodInternal(method, self, compilation_kind, prejit);
1741 }
1742 
GetTaskCount(Thread * self)1743 size_t JitThreadPool::GetTaskCount(Thread* self) {
1744   MutexLock mu(self, task_queue_lock_);
1745   return generic_queue_.size() +
1746       baseline_queue_.size() +
1747       optimized_queue_.size() +
1748       osr_queue_.size();
1749 }
1750 
RemoveAllTasks(Thread * self)1751 void JitThreadPool::RemoveAllTasks(Thread* self) {
1752   // The ThreadPool is responsible for calling Finalize (which usually deletes
1753   // the task memory) on all the tasks.
1754   Task* task = nullptr;
1755   do {
1756     {
1757       MutexLock mu(self, task_queue_lock_);
1758       if (generic_queue_.empty()) {
1759         break;
1760       }
1761       task = generic_queue_.front();
1762       generic_queue_.pop_front();
1763     }
1764     task->Finalize();
1765   } while (true);
1766 
1767   MutexLock mu(self, task_queue_lock_);
1768   baseline_queue_.clear();
1769   optimized_queue_.clear();
1770   osr_queue_.clear();
1771 }
1772 
~JitThreadPool()1773 JitThreadPool::~JitThreadPool() {
1774   DeleteThreads();
1775   RemoveAllTasks(Thread::Current());
1776 }
1777 
AddTask(Thread * self,Task * task)1778 void JitThreadPool::AddTask(Thread* self, Task* task) {
1779   MutexLock mu(self, task_queue_lock_);
1780   // We don't want to enqueue any new tasks when thread pool has stopped. This simplifies
1781   // the implementation of redefinition feature in jvmti.
1782   if (!started_) {
1783     task->Finalize();
1784     return;
1785   }
1786   generic_queue_.push_back(task);
1787   // If we have any waiters, signal one.
1788   if (waiting_count_ != 0) {
1789     task_queue_condition_.Signal(self);
1790   }
1791 }
1792 
AddTask(Thread * self,ArtMethod * method,CompilationKind kind)1793 void JitThreadPool::AddTask(Thread* self, ArtMethod* method, CompilationKind kind) {
1794   MutexLock mu(self, task_queue_lock_);
1795   // We don't want to enqueue any new tasks when thread pool has stopped. This simplifies
1796   // the implementation of redefinition feature in jvmti.
1797   if (!started_) {
1798     return;
1799   }
1800   switch (kind) {
1801     case CompilationKind::kOsr:
1802       if (ContainsElement(osr_enqueued_methods_, method)) {
1803         return;
1804       }
1805       osr_enqueued_methods_.insert(method);
1806       osr_queue_.push_back(method);
1807       break;
1808     case CompilationKind::kBaseline:
1809       if (ContainsElement(baseline_enqueued_methods_, method)) {
1810         return;
1811       }
1812       baseline_enqueued_methods_.insert(method);
1813       baseline_queue_.push_back(method);
1814       break;
1815     case CompilationKind::kOptimized:
1816       if (ContainsElement(optimized_enqueued_methods_, method)) {
1817         return;
1818       }
1819       optimized_enqueued_methods_.insert(method);
1820       optimized_queue_.push_back(method);
1821       break;
1822   }
1823   // If we have any waiters, signal one.
1824   if (waiting_count_ != 0) {
1825     task_queue_condition_.Signal(self);
1826   }
1827 }
1828 
TryGetTaskLocked()1829 Task* JitThreadPool::TryGetTaskLocked() {
1830   if (!started_) {
1831     return nullptr;
1832   }
1833 
1834   // Fetch generic tasks first.
1835   if (!generic_queue_.empty()) {
1836     Task* task = generic_queue_.front();
1837     generic_queue_.pop_front();
1838     return task;
1839   }
1840 
1841   // OSR requests second, then baseline and finally optimized.
1842   Task* task = FetchFrom(osr_queue_, CompilationKind::kOsr);
1843   if (task == nullptr) {
1844     task = FetchFrom(baseline_queue_, CompilationKind::kBaseline);
1845     if (task == nullptr) {
1846       task = FetchFrom(optimized_queue_, CompilationKind::kOptimized);
1847     }
1848   }
1849   return task;
1850 }
1851 
FetchFrom(std::deque<ArtMethod * > & methods,CompilationKind kind)1852 Task* JitThreadPool::FetchFrom(std::deque<ArtMethod*>& methods, CompilationKind kind) {
1853   if (!methods.empty()) {
1854     ArtMethod* method = methods.front();
1855     methods.pop_front();
1856     JitCompileTask* task = new JitCompileTask(method, JitCompileTask::TaskKind::kCompile, kind);
1857     current_compilations_.insert(task);
1858     return task;
1859   }
1860   return nullptr;
1861 }
1862 
Remove(JitCompileTask * task)1863 void JitThreadPool::Remove(JitCompileTask* task) {
1864   MutexLock mu(Thread::Current(), task_queue_lock_);
1865   current_compilations_.erase(task);
1866   switch (task->GetCompilationKind()) {
1867     case CompilationKind::kOsr: {
1868       osr_enqueued_methods_.erase(task->GetArtMethod());
1869       break;
1870     }
1871     case CompilationKind::kBaseline: {
1872       baseline_enqueued_methods_.erase(task->GetArtMethod());
1873       break;
1874     }
1875     case CompilationKind::kOptimized: {
1876       optimized_enqueued_methods_.erase(task->GetArtMethod());
1877       break;
1878     }
1879   }
1880 }
1881 
VisitRoots(RootVisitor * visitor)1882 void Jit::VisitRoots(RootVisitor* visitor) {
1883   if (thread_pool_ != nullptr) {
1884     thread_pool_->VisitRoots(visitor);
1885   }
1886 }
1887 
VisitRoots(RootVisitor * visitor)1888 void JitThreadPool::VisitRoots(RootVisitor* visitor) {
1889   if (Runtime::Current()->GetHeap()->IsPerformingUffdCompaction()) {
1890     // In case of userfaultfd compaction, ArtMethods are updated concurrently
1891     // via linear-alloc.
1892     return;
1893   }
1894   // Fetch all ArtMethod first, to avoid holding `task_queue_lock_` for too
1895   // long.
1896   std::vector<ArtMethod*> methods;
1897   {
1898     MutexLock mu(Thread::Current(), task_queue_lock_);
1899     // We don't look at `generic_queue_` because it contains:
1900     // - Generic tasks like `ZygoteVerificationTask` which don't hold any root.
1901     // - `JitCompileTask` for precompiled methods, which we know are live, being
1902     //   part of the boot classpath or system server classpath.
1903     methods.insert(methods.end(), osr_queue_.begin(), osr_queue_.end());
1904     methods.insert(methods.end(), baseline_queue_.begin(), baseline_queue_.end());
1905     methods.insert(methods.end(), optimized_queue_.begin(), optimized_queue_.end());
1906     for (JitCompileTask* task : current_compilations_) {
1907       methods.push_back(task->GetArtMethod());
1908     }
1909   }
1910   UnbufferedRootVisitor root_visitor(visitor, RootInfo(kRootStickyClass));
1911   for (ArtMethod* method : methods) {
1912     method->VisitRoots(root_visitor, kRuntimePointerSize);
1913   }
1914 }
1915 
1916 }  // namespace jit
1917 }  // namespace art
1918