1 /*
2 * Copyright 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "jit.h"
18
19 #include <dlfcn.h>
20
21 #include "art_method-inl.h"
22 #include "base/enums.h"
23 #include "base/file_utils.h"
24 #include "base/logging.h" // For VLOG.
25 #include "base/memfd.h"
26 #include "base/memory_tool.h"
27 #include "base/runtime_debug.h"
28 #include "base/scoped_flock.h"
29 #include "base/utils.h"
30 #include "class_root-inl.h"
31 #include "compilation_kind.h"
32 #include "debugger.h"
33 #include "dex/type_lookup_table.h"
34 #include "gc/space/image_space.h"
35 #include "entrypoints/entrypoint_utils-inl.h"
36 #include "entrypoints/runtime_asm_entrypoints.h"
37 #include "image-inl.h"
38 #include "interpreter/interpreter.h"
39 #include "jit-inl.h"
40 #include "jit_code_cache.h"
41 #include "jni/java_vm_ext.h"
42 #include "mirror/method_handle_impl.h"
43 #include "mirror/var_handle.h"
44 #include "oat_file.h"
45 #include "oat_file_manager.h"
46 #include "oat_quick_method_header.h"
47 #include "profile/profile_boot_info.h"
48 #include "profile/profile_compilation_info.h"
49 #include "profile_saver.h"
50 #include "runtime.h"
51 #include "runtime_options.h"
52 #include "stack.h"
53 #include "stack_map.h"
54 #include "thread-inl.h"
55 #include "thread_list.h"
56
57 using android::base::unique_fd;
58
59 namespace art {
60 namespace jit {
61
62 static constexpr bool kEnableOnStackReplacement = true;
63
64 // Maximum permitted threshold value.
65 static constexpr uint32_t kJitMaxThreshold = std::numeric_limits<uint16_t>::max();
66
67 static constexpr uint32_t kJitDefaultOptimizeThreshold = 0xffff;
68 // Different optimization threshold constants. These default to the equivalent optimization
69 // thresholds divided by 2, but can be overridden at the command-line.
70 static constexpr uint32_t kJitStressDefaultOptimizeThreshold = kJitDefaultOptimizeThreshold / 2;
71 static constexpr uint32_t kJitSlowStressDefaultOptimizeThreshold =
72 kJitStressDefaultOptimizeThreshold / 2;
73
74 static constexpr uint32_t kJitDefaultWarmupThreshold = 0xffff;
75 // Different warm-up threshold constants. These default to the equivalent warmup thresholds divided
76 // by 2, but can be overridden at the command-line.
77 static constexpr uint32_t kJitStressDefaultWarmupThreshold = kJitDefaultWarmupThreshold / 2;
78 static constexpr uint32_t kJitSlowStressDefaultWarmupThreshold =
79 kJitStressDefaultWarmupThreshold / 2;
80
81 DEFINE_RUNTIME_DEBUG_FLAG(Jit, kSlowMode);
82
83 // JIT compiler
84 void* Jit::jit_library_handle_ = nullptr;
85 JitCompilerInterface* Jit::jit_compiler_ = nullptr;
86 JitCompilerInterface* (*Jit::jit_load_)(void) = nullptr;
87
CreateFromRuntimeArguments(const RuntimeArgumentMap & options)88 JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& options) {
89 auto* jit_options = new JitOptions;
90 jit_options->use_jit_compilation_ = options.GetOrDefault(RuntimeArgumentMap::UseJitCompilation);
91 jit_options->use_profiled_jit_compilation_ =
92 options.GetOrDefault(RuntimeArgumentMap::UseProfiledJitCompilation);
93
94 jit_options->code_cache_initial_capacity_ =
95 options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheInitialCapacity);
96 jit_options->code_cache_max_capacity_ =
97 options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheMaxCapacity);
98 jit_options->dump_info_on_shutdown_ =
99 options.Exists(RuntimeArgumentMap::DumpJITInfoOnShutdown);
100 jit_options->profile_saver_options_ =
101 options.GetOrDefault(RuntimeArgumentMap::ProfileSaverOpts);
102 jit_options->thread_pool_pthread_priority_ =
103 options.GetOrDefault(RuntimeArgumentMap::JITPoolThreadPthreadPriority);
104 jit_options->zygote_thread_pool_pthread_priority_ =
105 options.GetOrDefault(RuntimeArgumentMap::JITZygotePoolThreadPthreadPriority);
106
107 // Set default optimize threshold to aid with checking defaults.
108 jit_options->optimize_threshold_ =
109 kIsDebugBuild
110 ? (Jit::kSlowMode
111 ? kJitSlowStressDefaultOptimizeThreshold
112 : kJitStressDefaultOptimizeThreshold)
113 : kJitDefaultOptimizeThreshold;
114
115 // Set default warm-up threshold to aid with checking defaults.
116 jit_options->warmup_threshold_ =
117 kIsDebugBuild ? (Jit::kSlowMode
118 ? kJitSlowStressDefaultWarmupThreshold
119 : kJitStressDefaultWarmupThreshold)
120 : kJitDefaultWarmupThreshold;
121
122 if (options.Exists(RuntimeArgumentMap::JITOptimizeThreshold)) {
123 jit_options->optimize_threshold_ = *options.Get(RuntimeArgumentMap::JITOptimizeThreshold);
124 }
125 DCHECK_LE(jit_options->optimize_threshold_, kJitMaxThreshold);
126
127 if (options.Exists(RuntimeArgumentMap::JITWarmupThreshold)) {
128 jit_options->warmup_threshold_ = *options.Get(RuntimeArgumentMap::JITWarmupThreshold);
129 }
130 DCHECK_LE(jit_options->warmup_threshold_, kJitMaxThreshold);
131
132 if (options.Exists(RuntimeArgumentMap::JITPriorityThreadWeight)) {
133 jit_options->priority_thread_weight_ =
134 *options.Get(RuntimeArgumentMap::JITPriorityThreadWeight);
135 if (jit_options->priority_thread_weight_ > jit_options->warmup_threshold_) {
136 LOG(FATAL) << "Priority thread weight is above the warmup threshold.";
137 } else if (jit_options->priority_thread_weight_ == 0) {
138 LOG(FATAL) << "Priority thread weight cannot be 0.";
139 }
140 } else {
141 jit_options->priority_thread_weight_ = std::max(
142 jit_options->warmup_threshold_ / Jit::kDefaultPriorityThreadWeightRatio,
143 static_cast<size_t>(1));
144 }
145
146 if (options.Exists(RuntimeArgumentMap::JITInvokeTransitionWeight)) {
147 jit_options->invoke_transition_weight_ =
148 *options.Get(RuntimeArgumentMap::JITInvokeTransitionWeight);
149 if (jit_options->invoke_transition_weight_ > jit_options->warmup_threshold_) {
150 LOG(FATAL) << "Invoke transition weight is above the warmup threshold.";
151 } else if (jit_options->invoke_transition_weight_ == 0) {
152 LOG(FATAL) << "Invoke transition weight cannot be 0.";
153 }
154 } else {
155 jit_options->invoke_transition_weight_ = std::max(
156 jit_options->warmup_threshold_ / Jit::kDefaultInvokeTransitionWeightRatio,
157 static_cast<size_t>(1));
158 }
159
160 return jit_options;
161 }
162
DumpInfo(std::ostream & os)163 void Jit::DumpInfo(std::ostream& os) {
164 code_cache_->Dump(os);
165 cumulative_timings_.Dump(os);
166 MutexLock mu(Thread::Current(), lock_);
167 memory_use_.PrintMemoryUse(os);
168 }
169
DumpForSigQuit(std::ostream & os)170 void Jit::DumpForSigQuit(std::ostream& os) {
171 DumpInfo(os);
172 ProfileSaver::DumpInstanceInfo(os);
173 }
174
AddTimingLogger(const TimingLogger & logger)175 void Jit::AddTimingLogger(const TimingLogger& logger) {
176 cumulative_timings_.AddLogger(logger);
177 }
178
Jit(JitCodeCache * code_cache,JitOptions * options)179 Jit::Jit(JitCodeCache* code_cache, JitOptions* options)
180 : code_cache_(code_cache),
181 options_(options),
182 boot_completed_lock_("Jit::boot_completed_lock_"),
183 cumulative_timings_("JIT timings"),
184 memory_use_("Memory used for compilation", 16),
185 lock_("JIT memory use lock"),
186 zygote_mapping_methods_(),
187 fd_methods_(-1),
188 fd_methods_size_(0) {}
189
Create(JitCodeCache * code_cache,JitOptions * options)190 Jit* Jit::Create(JitCodeCache* code_cache, JitOptions* options) {
191 if (jit_load_ == nullptr) {
192 LOG(WARNING) << "Not creating JIT: library not loaded";
193 return nullptr;
194 }
195 jit_compiler_ = (jit_load_)();
196 if (jit_compiler_ == nullptr) {
197 LOG(WARNING) << "Not creating JIT: failed to allocate a compiler";
198 return nullptr;
199 }
200 std::unique_ptr<Jit> jit(new Jit(code_cache, options));
201
202 // If the code collector is enabled, check if that still holds:
203 // With 'perf', we want a 1-1 mapping between an address and a method.
204 // We aren't able to keep method pointers live during the instrumentation method entry trampoline
205 // so we will just disable jit-gc if we are doing that.
206 // JitAtFirstUse compiles the methods synchronously on mutator threads. While this should work
207 // in theory it is causing deadlocks in some jvmti tests related to Jit GC. Hence, disabling
208 // Jit GC for now (b/147208992).
209 if (code_cache->GetGarbageCollectCode()) {
210 code_cache->SetGarbageCollectCode(!jit_compiler_->GenerateDebugInfo() &&
211 !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled() &&
212 !jit->JitAtFirstUse());
213 }
214
215 VLOG(jit) << "JIT created with initial_capacity="
216 << PrettySize(options->GetCodeCacheInitialCapacity())
217 << ", max_capacity=" << PrettySize(options->GetCodeCacheMaxCapacity())
218 << ", warmup_threshold=" << options->GetWarmupThreshold()
219 << ", optimize_threshold=" << options->GetOptimizeThreshold()
220 << ", profile_saver_options=" << options->GetProfileSaverOptions();
221
222 // We want to know whether the compiler is compiling baseline, as this
223 // affects how we GC ProfilingInfos.
224 for (const std::string& option : Runtime::Current()->GetCompilerOptions()) {
225 if (option == "--baseline") {
226 options->SetUseBaselineCompiler();
227 break;
228 }
229 }
230
231 // Notify native debugger about the classes already loaded before the creation of the jit.
232 jit->DumpTypeInfoForLoadedTypes(Runtime::Current()->GetClassLinker());
233 return jit.release();
234 }
235
236 template <typename T>
LoadSymbol(T * address,const char * name,std::string * error_msg)237 bool Jit::LoadSymbol(T* address, const char* name, std::string* error_msg) {
238 *address = reinterpret_cast<T>(dlsym(jit_library_handle_, name));
239 if (*address == nullptr) {
240 *error_msg = std::string("JIT couldn't find ") + name + std::string(" entry point");
241 return false;
242 }
243 return true;
244 }
245
LoadCompilerLibrary(std::string * error_msg)246 bool Jit::LoadCompilerLibrary(std::string* error_msg) {
247 jit_library_handle_ = dlopen(
248 kIsDebugBuild ? "libartd-compiler.so" : "libart-compiler.so", RTLD_NOW);
249 if (jit_library_handle_ == nullptr) {
250 std::ostringstream oss;
251 oss << "JIT could not load libart-compiler.so: " << dlerror();
252 *error_msg = oss.str();
253 return false;
254 }
255 if (!LoadSymbol(&jit_load_, "jit_load", error_msg)) {
256 dlclose(jit_library_handle_);
257 return false;
258 }
259 return true;
260 }
261
CompileMethod(ArtMethod * method,Thread * self,CompilationKind compilation_kind,bool prejit)262 bool Jit::CompileMethod(ArtMethod* method,
263 Thread* self,
264 CompilationKind compilation_kind,
265 bool prejit) {
266 DCHECK(Runtime::Current()->UseJitCompilation());
267 DCHECK(!method->IsRuntimeMethod());
268
269 // If the baseline flag was explicitly passed in the compiler options, change the compilation kind
270 // from optimized to baseline.
271 if (jit_compiler_->IsBaselineCompiler() && compilation_kind == CompilationKind::kOptimized) {
272 compilation_kind = CompilationKind::kBaseline;
273 }
274
275 // If we're asked to compile baseline, but we cannot allocate profiling infos,
276 // change the compilation kind to optimized.
277 if ((compilation_kind == CompilationKind::kBaseline) &&
278 !GetCodeCache()->CanAllocateProfilingInfo()) {
279 compilation_kind = CompilationKind::kOptimized;
280 }
281
282 RuntimeCallbacks* cb = Runtime::Current()->GetRuntimeCallbacks();
283 // Don't compile the method if it has breakpoints.
284 if (cb->IsMethodBeingInspected(method)) {
285 VLOG(jit) << "JIT not compiling " << method->PrettyMethod()
286 << " due to not being safe to jit according to runtime-callbacks. For example, there"
287 << " could be breakpoints in this method.";
288 return false;
289 }
290
291 if (!method->IsCompilable()) {
292 DCHECK(method->GetDeclaringClass()->IsObsoleteObject() ||
293 method->IsProxyMethod()) << method->PrettyMethod();
294 VLOG(jit) << "JIT not compiling " << method->PrettyMethod() << " due to method being made "
295 << "obsolete while waiting for JIT task to run. This probably happened due to "
296 << "concurrent structural class redefinition.";
297 return false;
298 }
299
300 // Don't compile the method if we are supposed to be deoptimized.
301 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
302 if (instrumentation->AreAllMethodsDeoptimized() || instrumentation->IsDeoptimized(method)) {
303 VLOG(jit) << "JIT not compiling " << method->PrettyMethod() << " due to deoptimization";
304 return false;
305 }
306
307 JitMemoryRegion* region = GetCodeCache()->GetCurrentRegion();
308 if ((compilation_kind == CompilationKind::kOsr) && GetCodeCache()->IsSharedRegion(*region)) {
309 VLOG(jit) << "JIT not osr compiling "
310 << method->PrettyMethod()
311 << " due to using shared region";
312 return false;
313 }
314
315 // If we get a request to compile a proxy method, we pass the actual Java method
316 // of that proxy method, as the compiler does not expect a proxy method.
317 ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
318 if (!code_cache_->NotifyCompilationOf(method_to_compile, self, compilation_kind, prejit)) {
319 return false;
320 }
321
322 VLOG(jit) << "Compiling method "
323 << ArtMethod::PrettyMethod(method_to_compile)
324 << " kind=" << compilation_kind;
325 bool success = jit_compiler_->CompileMethod(self, region, method_to_compile, compilation_kind);
326 code_cache_->DoneCompiling(method_to_compile, self, compilation_kind);
327 if (!success) {
328 VLOG(jit) << "Failed to compile method "
329 << ArtMethod::PrettyMethod(method_to_compile)
330 << " kind=" << compilation_kind;
331 }
332 if (kIsDebugBuild) {
333 if (self->IsExceptionPending()) {
334 mirror::Throwable* exception = self->GetException();
335 LOG(FATAL) << "No pending exception expected after compiling "
336 << ArtMethod::PrettyMethod(method)
337 << ": "
338 << exception->Dump();
339 }
340 }
341 return success;
342 }
343
WaitForWorkersToBeCreated()344 void Jit::WaitForWorkersToBeCreated() {
345 if (thread_pool_ != nullptr) {
346 thread_pool_->WaitForWorkersToBeCreated();
347 }
348 }
349
DeleteThreadPool()350 void Jit::DeleteThreadPool() {
351 Thread* self = Thread::Current();
352 if (thread_pool_ != nullptr) {
353 std::unique_ptr<ThreadPool> pool;
354 {
355 ScopedSuspendAll ssa(__FUNCTION__);
356 // Clear thread_pool_ field while the threads are suspended.
357 // A mutator in the 'AddSamples' method will check against it.
358 pool = std::move(thread_pool_);
359 }
360
361 // When running sanitized, let all tasks finish to not leak. Otherwise just clear the queue.
362 if (!kRunningOnMemoryTool) {
363 pool->StopWorkers(self);
364 pool->RemoveAllTasks(self);
365 }
366 // We could just suspend all threads, but we know those threads
367 // will finish in a short period, so it's not worth adding a suspend logic
368 // here. Besides, this is only done for shutdown.
369 pool->Wait(self, false, false);
370 }
371 }
372
StartProfileSaver(const std::string & profile_filename,const std::vector<std::string> & code_paths,const std::string & ref_profile_filename)373 void Jit::StartProfileSaver(const std::string& profile_filename,
374 const std::vector<std::string>& code_paths,
375 const std::string& ref_profile_filename) {
376 if (options_->GetSaveProfilingInfo()) {
377 ProfileSaver::Start(options_->GetProfileSaverOptions(),
378 profile_filename,
379 code_cache_,
380 code_paths,
381 ref_profile_filename);
382 }
383 }
384
StopProfileSaver()385 void Jit::StopProfileSaver() {
386 if (options_->GetSaveProfilingInfo() && ProfileSaver::IsStarted()) {
387 ProfileSaver::Stop(options_->DumpJitInfoOnShutdown());
388 }
389 }
390
JitAtFirstUse()391 bool Jit::JitAtFirstUse() {
392 return HotMethodThreshold() == 0;
393 }
394
CanInvokeCompiledCode(ArtMethod * method)395 bool Jit::CanInvokeCompiledCode(ArtMethod* method) {
396 return code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode());
397 }
398
~Jit()399 Jit::~Jit() {
400 DCHECK_IMPLIES(options_->GetSaveProfilingInfo(), !ProfileSaver::IsStarted());
401 if (options_->DumpJitInfoOnShutdown()) {
402 DumpInfo(LOG_STREAM(INFO));
403 Runtime::Current()->DumpDeoptimizations(LOG_STREAM(INFO));
404 }
405 DeleteThreadPool();
406 if (jit_compiler_ != nullptr) {
407 delete jit_compiler_;
408 jit_compiler_ = nullptr;
409 }
410 if (jit_library_handle_ != nullptr) {
411 dlclose(jit_library_handle_);
412 jit_library_handle_ = nullptr;
413 }
414 }
415
NewTypeLoadedIfUsingJit(mirror::Class * type)416 void Jit::NewTypeLoadedIfUsingJit(mirror::Class* type) {
417 if (!Runtime::Current()->UseJitCompilation()) {
418 // No need to notify if we only use the JIT to save profiles.
419 return;
420 }
421 jit::Jit* jit = Runtime::Current()->GetJit();
422 if (jit->jit_compiler_->GenerateDebugInfo()) {
423 jit_compiler_->TypesLoaded(&type, 1);
424 }
425 }
426
DumpTypeInfoForLoadedTypes(ClassLinker * linker)427 void Jit::DumpTypeInfoForLoadedTypes(ClassLinker* linker) {
428 struct CollectClasses : public ClassVisitor {
429 bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
430 classes_.push_back(klass.Ptr());
431 return true;
432 }
433 std::vector<mirror::Class*> classes_;
434 };
435
436 if (jit_compiler_->GenerateDebugInfo()) {
437 ScopedObjectAccess so(Thread::Current());
438
439 CollectClasses visitor;
440 linker->VisitClasses(&visitor);
441 jit_compiler_->TypesLoaded(visitor.classes_.data(), visitor.classes_.size());
442 }
443 }
444
445 extern "C" void art_quick_osr_stub(void** stack,
446 size_t stack_size_in_bytes,
447 const uint8_t* native_pc,
448 JValue* result,
449 const char* shorty,
450 Thread* self);
451
PrepareForOsr(ArtMethod * method,uint32_t dex_pc,uint32_t * vregs)452 OsrData* Jit::PrepareForOsr(ArtMethod* method, uint32_t dex_pc, uint32_t* vregs) {
453 if (!kEnableOnStackReplacement) {
454 return nullptr;
455 }
456
457 // Cheap check if the method has been compiled already. That's an indicator that we should
458 // osr into it.
459 if (!GetCodeCache()->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
460 return nullptr;
461 }
462
463 // Fetch some data before looking up for an OSR method. We don't want thread
464 // suspension once we hold an OSR method, as the JIT code cache could delete the OSR
465 // method while we are being suspended.
466 CodeItemDataAccessor accessor(method->DexInstructionData());
467 const size_t number_of_vregs = accessor.RegistersSize();
468 std::string method_name(VLOG_IS_ON(jit) ? method->PrettyMethod() : "");
469 OsrData* osr_data = nullptr;
470
471 {
472 ScopedAssertNoThreadSuspension sts("Holding OSR method");
473 const OatQuickMethodHeader* osr_method = GetCodeCache()->LookupOsrMethodHeader(method);
474 if (osr_method == nullptr) {
475 // No osr method yet, just return to the interpreter.
476 return nullptr;
477 }
478
479 CodeInfo code_info(osr_method);
480
481 // Find stack map starting at the target dex_pc.
482 StackMap stack_map = code_info.GetOsrStackMapForDexPc(dex_pc);
483 if (!stack_map.IsValid()) {
484 // There is no OSR stack map for this dex pc offset. Just return to the interpreter in the
485 // hope that the next branch has one.
486 return nullptr;
487 }
488
489 // We found a stack map, now fill the frame with dex register values from the interpreter's
490 // shadow frame.
491 DexRegisterMap vreg_map = code_info.GetDexRegisterMapOf(stack_map);
492 DCHECK_EQ(vreg_map.size(), number_of_vregs);
493
494 size_t frame_size = osr_method->GetFrameSizeInBytes();
495
496 // Allocate memory to put shadow frame values. The osr stub will copy that memory to
497 // stack.
498 // Note that we could pass the shadow frame to the stub, and let it copy the values there,
499 // but that is engineering complexity not worth the effort for something like OSR.
500 osr_data = reinterpret_cast<OsrData*>(malloc(sizeof(OsrData) + frame_size));
501 if (osr_data == nullptr) {
502 return nullptr;
503 }
504 memset(osr_data, 0, sizeof(OsrData) + frame_size);
505 osr_data->frame_size = frame_size;
506
507 // Art ABI: ArtMethod is at the bottom of the stack.
508 osr_data->memory[0] = method;
509
510 if (vreg_map.empty()) {
511 // If we don't have a dex register map, then there are no live dex registers at
512 // this dex pc.
513 } else {
514 for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
515 DexRegisterLocation::Kind location = vreg_map[vreg].GetKind();
516 if (location == DexRegisterLocation::Kind::kNone) {
517 // Dex register is dead or uninitialized.
518 continue;
519 }
520
521 if (location == DexRegisterLocation::Kind::kConstant) {
522 // We skip constants because the compiled code knows how to handle them.
523 continue;
524 }
525
526 DCHECK_EQ(location, DexRegisterLocation::Kind::kInStack);
527
528 int32_t vreg_value = vregs[vreg];
529 int32_t slot_offset = vreg_map[vreg].GetStackOffsetInBytes();
530 DCHECK_LT(slot_offset, static_cast<int32_t>(frame_size));
531 DCHECK_GT(slot_offset, 0);
532 (reinterpret_cast<int32_t*>(osr_data->memory))[slot_offset / sizeof(int32_t)] = vreg_value;
533 }
534 }
535
536 osr_data->native_pc = stack_map.GetNativePcOffset(kRuntimeISA) +
537 osr_method->GetEntryPoint();
538 VLOG(jit) << "Jumping to "
539 << method_name
540 << "@"
541 << std::hex << reinterpret_cast<uintptr_t>(osr_data->native_pc);
542 }
543 return osr_data;
544 }
545
MaybeDoOnStackReplacement(Thread * thread,ArtMethod * method,uint32_t dex_pc,int32_t dex_pc_offset,JValue * result)546 bool Jit::MaybeDoOnStackReplacement(Thread* thread,
547 ArtMethod* method,
548 uint32_t dex_pc,
549 int32_t dex_pc_offset,
550 JValue* result) {
551 Jit* jit = Runtime::Current()->GetJit();
552 if (jit == nullptr) {
553 return false;
554 }
555
556 if (UNLIKELY(__builtin_frame_address(0) < thread->GetStackEnd())) {
557 // Don't attempt to do an OSR if we are close to the stack limit. Since
558 // the interpreter frames are still on stack, OSR has the potential
559 // to stack overflow even for a simple loop.
560 // b/27094810.
561 return false;
562 }
563
564 // Get the actual Java method if this method is from a proxy class. The compiler
565 // and the JIT code cache do not expect methods from proxy classes.
566 method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
567
568 // Before allowing the jump, make sure no code is actively inspecting the method to avoid
569 // jumping from interpreter to OSR while e.g. single stepping. Note that we could selectively
570 // disable OSR when single stepping, but that's currently hard to know at this point.
571 if (Runtime::Current()->GetInstrumentation()->InterpreterStubsInstalled() ||
572 Runtime::Current()->GetInstrumentation()->IsDeoptimized(method) ||
573 thread->IsForceInterpreter() ||
574 method->GetDeclaringClass()->IsObsoleteObject() ||
575 Dbg::IsForcedInterpreterNeededForUpcall(thread, method) ||
576 Runtime::Current()->GetRuntimeCallbacks()->IsMethodBeingInspected(method)) {
577 return false;
578 }
579
580 ShadowFrame* shadow_frame = thread->GetManagedStack()->GetTopShadowFrame();
581 OsrData* osr_data = jit->PrepareForOsr(method,
582 dex_pc + dex_pc_offset,
583 shadow_frame->GetVRegArgs(0));
584
585 if (osr_data == nullptr) {
586 return false;
587 }
588
589 {
590 thread->PopShadowFrame();
591 ManagedStack fragment;
592 thread->PushManagedStackFragment(&fragment);
593 (*art_quick_osr_stub)(osr_data->memory,
594 osr_data->frame_size,
595 osr_data->native_pc,
596 result,
597 method->GetShorty(),
598 thread);
599
600 if (UNLIKELY(thread->GetException() == Thread::GetDeoptimizationException())) {
601 thread->DeoptimizeWithDeoptimizationException(result);
602 }
603 thread->PopManagedStackFragment(fragment);
604 }
605 free(osr_data);
606 thread->PushShadowFrame(shadow_frame);
607 VLOG(jit) << "Done running OSR code for " << method->PrettyMethod();
608 return true;
609 }
610
AddMemoryUsage(ArtMethod * method,size_t bytes)611 void Jit::AddMemoryUsage(ArtMethod* method, size_t bytes) {
612 if (bytes > 4 * MB) {
613 LOG(INFO) << "Compiler allocated "
614 << PrettySize(bytes)
615 << " to compile "
616 << ArtMethod::PrettyMethod(method);
617 }
618 MutexLock mu(Thread::Current(), lock_);
619 memory_use_.AddValue(bytes);
620 }
621
NotifyZygoteCompilationDone()622 void Jit::NotifyZygoteCompilationDone() {
623 if (fd_methods_ == -1) {
624 return;
625 }
626
627 size_t offset = 0;
628 for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
629 const ImageHeader& header = space->GetImageHeader();
630 const ImageSection& section = header.GetMethodsSection();
631 // Because mremap works at page boundaries, we can only handle methods
632 // within a page range. For methods that falls above or below the range,
633 // the child processes will copy their contents to their private mapping
634 // in `child_mapping_methods`. See `MapBootImageMethods`.
635 uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), kPageSize);
636 uint8_t* page_end =
637 AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), kPageSize);
638 if (page_end > page_start) {
639 uint64_t capacity = page_end - page_start;
640 memcpy(zygote_mapping_methods_.Begin() + offset, page_start, capacity);
641 offset += capacity;
642 }
643 }
644
645 // Do an msync to ensure we are not affected by writes still being in caches.
646 if (msync(zygote_mapping_methods_.Begin(), fd_methods_size_, MS_SYNC) != 0) {
647 PLOG(WARNING) << "Failed to sync boot image methods memory";
648 code_cache_->GetZygoteMap()->SetCompilationState(ZygoteCompilationState::kNotifiedFailure);
649 return;
650 }
651
652 // We don't need the shared mapping anymore, and we need to drop it in case
653 // the file hasn't been sealed writable.
654 zygote_mapping_methods_ = MemMap::Invalid();
655
656 // Seal writes now. Zygote and children will map the memory private in order
657 // to write to it.
658 if (fcntl(fd_methods_, F_ADD_SEALS, F_SEAL_SEAL | F_SEAL_WRITE) == -1) {
659 PLOG(WARNING) << "Failed to seal boot image methods file descriptor";
660 code_cache_->GetZygoteMap()->SetCompilationState(ZygoteCompilationState::kNotifiedFailure);
661 return;
662 }
663
664 std::string error_str;
665 MemMap child_mapping_methods = MemMap::MapFile(
666 fd_methods_size_,
667 PROT_READ | PROT_WRITE,
668 MAP_PRIVATE,
669 fd_methods_,
670 /* start= */ 0,
671 /* low_4gb= */ false,
672 "boot-image-methods",
673 &error_str);
674
675 if (!child_mapping_methods.IsValid()) {
676 LOG(WARNING) << "Failed to create child mapping of boot image methods: " << error_str;
677 code_cache_->GetZygoteMap()->SetCompilationState(ZygoteCompilationState::kNotifiedFailure);
678 return;
679 }
680
681 // Ensure the contents are the same as before: there was a window between
682 // the memcpy and the sealing where other processes could have changed the
683 // contents.
684 // Note this would not be needed if we could have used F_SEAL_FUTURE_WRITE,
685 // see b/143833776.
686 offset = 0;
687 for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
688 const ImageHeader& header = space->GetImageHeader();
689 const ImageSection& section = header.GetMethodsSection();
690 // Because mremap works at page boundaries, we can only handle methods
691 // within a page range. For methods that falls above or below the range,
692 // the child processes will copy their contents to their private mapping
693 // in `child_mapping_methods`. See `MapBootImageMethods`.
694 uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), kPageSize);
695 uint8_t* page_end =
696 AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), kPageSize);
697 if (page_end > page_start) {
698 uint64_t capacity = page_end - page_start;
699 if (memcmp(child_mapping_methods.Begin() + offset, page_start, capacity) != 0) {
700 LOG(WARNING) << "Contents differ in boot image methods data";
701 code_cache_->GetZygoteMap()->SetCompilationState(
702 ZygoteCompilationState::kNotifiedFailure);
703 return;
704 }
705 offset += capacity;
706 }
707 }
708
709 // Future spawned processes don't need the fd anymore.
710 fd_methods_.reset();
711
712 // In order to have the zygote and children share the memory, we also remap
713 // the memory into the zygote process.
714 offset = 0;
715 for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
716 const ImageHeader& header = space->GetImageHeader();
717 const ImageSection& section = header.GetMethodsSection();
718 // Because mremap works at page boundaries, we can only handle methods
719 // within a page range. For methods that falls above or below the range,
720 // the child processes will copy their contents to their private mapping
721 // in `child_mapping_methods`. See `MapBootImageMethods`.
722 uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), kPageSize);
723 uint8_t* page_end =
724 AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), kPageSize);
725 if (page_end > page_start) {
726 uint64_t capacity = page_end - page_start;
727 if (mremap(child_mapping_methods.Begin() + offset,
728 capacity,
729 capacity,
730 MREMAP_FIXED | MREMAP_MAYMOVE,
731 page_start) == MAP_FAILED) {
732 // Failing to remap is safe as the process will just use the old
733 // contents.
734 PLOG(WARNING) << "Failed mremap of boot image methods of " << space->GetImageFilename();
735 }
736 offset += capacity;
737 }
738 }
739
740 LOG(INFO) << "Successfully notified child processes on sharing boot image methods";
741
742 // Mark that compilation of boot classpath is done, and memory can now be
743 // shared. Other processes will pick up this information.
744 code_cache_->GetZygoteMap()->SetCompilationState(ZygoteCompilationState::kNotifiedOk);
745
746 // The private mapping created for this process has been mremaped. We can
747 // reset it.
748 child_mapping_methods.Reset();
749 }
750
751 class JitCompileTask final : public Task {
752 public:
753 enum class TaskKind {
754 kCompile,
755 kPreCompile,
756 };
757
JitCompileTask(ArtMethod * method,TaskKind task_kind,CompilationKind compilation_kind)758 JitCompileTask(ArtMethod* method, TaskKind task_kind, CompilationKind compilation_kind)
759 : method_(method), kind_(task_kind), compilation_kind_(compilation_kind), klass_(nullptr) {
760 ScopedObjectAccess soa(Thread::Current());
761 // For a non-bootclasspath class, add a global ref to the class to prevent class unloading
762 // until compilation is done.
763 // When we precompile, this is either with boot classpath methods, or main
764 // class loader methods, so we don't need to keep a global reference.
765 if (method->GetDeclaringClass()->GetClassLoader() != nullptr &&
766 kind_ != TaskKind::kPreCompile) {
767 klass_ = soa.Vm()->AddGlobalRef(soa.Self(), method_->GetDeclaringClass());
768 CHECK(klass_ != nullptr);
769 }
770 }
771
~JitCompileTask()772 ~JitCompileTask() {
773 if (klass_ != nullptr) {
774 ScopedObjectAccess soa(Thread::Current());
775 soa.Vm()->DeleteGlobalRef(soa.Self(), klass_);
776 }
777 }
778
Run(Thread * self)779 void Run(Thread* self) override {
780 {
781 ScopedObjectAccess soa(self);
782 switch (kind_) {
783 case TaskKind::kCompile:
784 case TaskKind::kPreCompile: {
785 Runtime::Current()->GetJit()->CompileMethod(
786 method_,
787 self,
788 compilation_kind_,
789 /* prejit= */ (kind_ == TaskKind::kPreCompile));
790 break;
791 }
792 }
793 }
794 ProfileSaver::NotifyJitActivity();
795 }
796
Finalize()797 void Finalize() override {
798 delete this;
799 }
800
801 private:
802 ArtMethod* const method_;
803 const TaskKind kind_;
804 const CompilationKind compilation_kind_;
805 jobject klass_;
806
807 DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask);
808 };
809
GetProfileFile(const std::string & dex_location)810 static std::string GetProfileFile(const std::string& dex_location) {
811 // Hardcoded assumption where the profile file is.
812 // TODO(ngeoffray): this is brittle and we would need to change change if we
813 // wanted to do more eager JITting of methods in a profile. This is
814 // currently only for system server.
815 return dex_location + ".prof";
816 }
817
GetBootProfileFile(const std::string & profile)818 static std::string GetBootProfileFile(const std::string& profile) {
819 // The boot profile can be found next to the compilation profile, with a
820 // different extension.
821 return ReplaceFileExtension(profile, "bprof");
822 }
823
824 /**
825 * A JIT task to run after all profile compilation is done.
826 */
827 class JitDoneCompilingProfileTask final : public SelfDeletingTask {
828 public:
JitDoneCompilingProfileTask(const std::vector<const DexFile * > & dex_files)829 explicit JitDoneCompilingProfileTask(const std::vector<const DexFile*>& dex_files)
830 : dex_files_(dex_files) {}
831
Run(Thread * self ATTRIBUTE_UNUSED)832 void Run(Thread* self ATTRIBUTE_UNUSED) override {
833 // Madvise DONTNEED dex files now that we're done compiling methods.
834 for (const DexFile* dex_file : dex_files_) {
835 if (IsAddressKnownBackedByFileOrShared(dex_file->Begin())) {
836 int result = madvise(const_cast<uint8_t*>(AlignDown(dex_file->Begin(), kPageSize)),
837 RoundUp(dex_file->Size(), kPageSize),
838 MADV_DONTNEED);
839 if (result == -1) {
840 PLOG(WARNING) << "Madvise failed";
841 }
842 }
843 }
844 }
845
846 private:
847 std::vector<const DexFile*> dex_files_;
848
849 DISALLOW_COPY_AND_ASSIGN(JitDoneCompilingProfileTask);
850 };
851
852 class JitZygoteDoneCompilingTask final : public SelfDeletingTask {
853 public:
JitZygoteDoneCompilingTask()854 JitZygoteDoneCompilingTask() {}
855
Run(Thread * self ATTRIBUTE_UNUSED)856 void Run(Thread* self ATTRIBUTE_UNUSED) override {
857 DCHECK(Runtime::Current()->IsZygote());
858 Runtime::Current()->GetJit()->GetCodeCache()->GetZygoteMap()->SetCompilationState(
859 ZygoteCompilationState::kDone);
860 }
861
862 private:
863 DISALLOW_COPY_AND_ASSIGN(JitZygoteDoneCompilingTask);
864 };
865
866 /**
867 * A JIT task to run Java verification of boot classpath classes that were not
868 * verified at compile-time.
869 */
870 class ZygoteVerificationTask final : public Task {
871 public:
ZygoteVerificationTask()872 ZygoteVerificationTask() {}
873
Run(Thread * self)874 void Run(Thread* self) override {
875 // We are going to load class and run verification, which may also need to load
876 // classes. If the thread cannot load classes (typically when the runtime is
877 // debuggable), then just return.
878 if (!self->CanLoadClasses()) {
879 return;
880 }
881 Runtime* runtime = Runtime::Current();
882 ClassLinker* linker = runtime->GetClassLinker();
883 const std::vector<const DexFile*>& boot_class_path =
884 runtime->GetClassLinker()->GetBootClassPath();
885 ScopedObjectAccess soa(self);
886 StackHandleScope<1> hs(self);
887 MutableHandle<mirror::Class> klass = hs.NewHandle<mirror::Class>(nullptr);
888 uint64_t start_ns = ThreadCpuNanoTime();
889 uint64_t number_of_classes = 0;
890 for (const DexFile* dex_file : boot_class_path) {
891 if (dex_file->GetOatDexFile() != nullptr &&
892 dex_file->GetOatDexFile()->GetOatFile() != nullptr) {
893 // If backed by an .oat file, we have already run verification at
894 // compile-time. Note that some classes may still have failed
895 // verification there if they reference updatable mainline module
896 // classes.
897 continue;
898 }
899 for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
900 const dex::ClassDef& class_def = dex_file->GetClassDef(i);
901 const char* descriptor = dex_file->GetClassDescriptor(class_def);
902 ScopedNullHandle<mirror::ClassLoader> null_loader;
903 klass.Assign(linker->FindClass(self, descriptor, null_loader));
904 if (klass == nullptr) {
905 self->ClearException();
906 LOG(WARNING) << "Could not find " << descriptor;
907 continue;
908 }
909 if (linker->VerifyClass(self, /* verifier_deps= */ nullptr, klass) ==
910 verifier::FailureKind::kHardFailure) {
911 CHECK(self->IsExceptionPending());
912 LOG(WARNING) << "Methods in the boot classpath failed to verify: "
913 << self->GetException()->Dump();
914 self->ClearException();
915 } else {
916 ++number_of_classes;
917 }
918 CHECK(!self->IsExceptionPending());
919 }
920 }
921 LOG(INFO) << "Verified "
922 << number_of_classes
923 << " classes from mainline modules in "
924 << PrettyDuration(ThreadCpuNanoTime() - start_ns);
925 }
926 };
927
928 class ZygoteTask final : public Task {
929 public:
ZygoteTask()930 ZygoteTask() {}
931
Run(Thread * self)932 void Run(Thread* self) override {
933 Runtime* runtime = Runtime::Current();
934 uint32_t added_to_queue = 0;
935 for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
936 const std::vector<const DexFile*>& boot_class_path =
937 runtime->GetClassLinker()->GetBootClassPath();
938 ScopedNullHandle<mirror::ClassLoader> null_handle;
939 // We avoid doing compilation at boot for the secondary zygote, as apps forked from it are not
940 // critical for boot.
941 if (Runtime::Current()->IsPrimaryZygote()) {
942 for (const std::string& profile_file : space->GetProfileFiles()) {
943 std::string boot_profile = GetBootProfileFile(profile_file);
944 LOG(INFO) << "JIT Zygote looking at boot profile " << boot_profile;
945
946 // We add to the queue for zygote so that we can fork processes in-between compilations.
947 added_to_queue += runtime->GetJit()->CompileMethodsFromBootProfile(
948 self, boot_class_path, boot_profile, null_handle, /* add_to_queue= */ true);
949 }
950 }
951 for (const std::string& profile_file : space->GetProfileFiles()) {
952 LOG(INFO) << "JIT Zygote looking at profile " << profile_file;
953
954 added_to_queue += runtime->GetJit()->CompileMethodsFromProfile(
955 self, boot_class_path, profile_file, null_handle, /* add_to_queue= */ true);
956 }
957 }
958 DCHECK(runtime->GetJit()->InZygoteUsingJit());
959 runtime->GetJit()->AddPostBootTask(self, new JitZygoteDoneCompilingTask());
960
961 JitCodeCache* code_cache = runtime->GetJit()->GetCodeCache();
962 code_cache->GetZygoteMap()->Initialize(added_to_queue);
963 }
964
Finalize()965 void Finalize() override {
966 delete this;
967 }
968
969 private:
970 DISALLOW_COPY_AND_ASSIGN(ZygoteTask);
971 };
972
973 class JitProfileTask final : public Task {
974 public:
JitProfileTask(const std::vector<std::unique_ptr<const DexFile>> & dex_files,jobject class_loader)975 JitProfileTask(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
976 jobject class_loader) {
977 ScopedObjectAccess soa(Thread::Current());
978 StackHandleScope<1> hs(soa.Self());
979 Handle<mirror::ClassLoader> h_loader(hs.NewHandle(
980 soa.Decode<mirror::ClassLoader>(class_loader)));
981 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
982 for (const auto& dex_file : dex_files) {
983 dex_files_.push_back(dex_file.get());
984 // Register the dex file so that we can guarantee it doesn't get deleted
985 // while reading it during the task.
986 class_linker->RegisterDexFile(*dex_file.get(), h_loader.Get());
987 }
988 // We also create our own global ref to use this class loader later.
989 class_loader_ = soa.Vm()->AddGlobalRef(soa.Self(), h_loader.Get());
990 }
991
Run(Thread * self)992 void Run(Thread* self) override {
993 ScopedObjectAccess soa(self);
994 StackHandleScope<1> hs(self);
995 Handle<mirror::ClassLoader> loader = hs.NewHandle<mirror::ClassLoader>(
996 soa.Decode<mirror::ClassLoader>(class_loader_));
997
998 std::string profile = GetProfileFile(dex_files_[0]->GetLocation());
999 std::string boot_profile = GetBootProfileFile(profile);
1000
1001 Jit* jit = Runtime::Current()->GetJit();
1002
1003 jit->CompileMethodsFromBootProfile(
1004 self,
1005 dex_files_,
1006 boot_profile,
1007 loader,
1008 /* add_to_queue= */ false);
1009
1010 jit->CompileMethodsFromProfile(
1011 self,
1012 dex_files_,
1013 profile,
1014 loader,
1015 /* add_to_queue= */ true);
1016 }
1017
Finalize()1018 void Finalize() override {
1019 delete this;
1020 }
1021
~JitProfileTask()1022 ~JitProfileTask() {
1023 ScopedObjectAccess soa(Thread::Current());
1024 soa.Vm()->DeleteGlobalRef(soa.Self(), class_loader_);
1025 }
1026
1027 private:
1028 std::vector<const DexFile*> dex_files_;
1029 jobject class_loader_;
1030
1031 DISALLOW_COPY_AND_ASSIGN(JitProfileTask);
1032 };
1033
CopyIfDifferent(void * s1,const void * s2,size_t n)1034 static void CopyIfDifferent(void* s1, const void* s2, size_t n) {
1035 if (memcmp(s1, s2, n) != 0) {
1036 memcpy(s1, s2, n);
1037 }
1038 }
1039
MapBootImageMethods()1040 void Jit::MapBootImageMethods() {
1041 if (Runtime::Current()->IsJavaDebuggable()) {
1042 LOG(INFO) << "Not mapping boot image methods due to process being debuggable";
1043 return;
1044 }
1045 CHECK_NE(fd_methods_.get(), -1);
1046 if (!code_cache_->GetZygoteMap()->CanMapBootImageMethods()) {
1047 LOG(WARNING) << "Not mapping boot image methods due to error from zygote";
1048 // We don't need the fd anymore.
1049 fd_methods_.reset();
1050 return;
1051 }
1052
1053 std::string error_str;
1054 MemMap child_mapping_methods = MemMap::MapFile(
1055 fd_methods_size_,
1056 PROT_READ | PROT_WRITE,
1057 MAP_PRIVATE,
1058 fd_methods_,
1059 /* start= */ 0,
1060 /* low_4gb= */ false,
1061 "boot-image-methods",
1062 &error_str);
1063
1064 // We don't need the fd anymore.
1065 fd_methods_.reset();
1066
1067 if (!child_mapping_methods.IsValid()) {
1068 LOG(WARNING) << "Failed to create child mapping of boot image methods: " << error_str;
1069 return;
1070 }
1071 // We are going to mremap the child mapping into the image:
1072 //
1073 // ImageSection ChildMappingMethods
1074 //
1075 // section start --> -----------
1076 // | |
1077 // | |
1078 // page_start --> | | <----- -----------
1079 // | | | |
1080 // | | | |
1081 // | | | |
1082 // | | | |
1083 // | | | |
1084 // | | | |
1085 // | | | |
1086 // page_end --> | | <----- -----------
1087 // | |
1088 // section end --> -----------
1089 //
1090 size_t offset = 0;
1091 for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
1092 const ImageHeader& header = space->GetImageHeader();
1093 const ImageSection& section = header.GetMethodsSection();
1094 uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), kPageSize);
1095 uint8_t* page_end =
1096 AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), kPageSize);
1097 if (page_end <= page_start) {
1098 // Section doesn't contain one aligned entire page.
1099 continue;
1100 }
1101 uint64_t capacity = page_end - page_start;
1102 // Walk over methods in the boot image, and check for:
1103 // 1) methods whose class is not initialized in the process, but are in the
1104 // zygote process. For such methods, we need their entrypoints to be stubs
1105 // that do the initialization check.
1106 // 2) native methods whose data pointer is different than the one in the
1107 // zygote. Such methods may have had custom native implementation provided
1108 // by JNI RegisterNatives.
1109 header.VisitPackedArtMethods([&](ArtMethod& method) NO_THREAD_SAFETY_ANALYSIS {
1110 // Methods in the boot image should never have their single
1111 // implementation flag set (and therefore never have a `data_` pointing
1112 // to an ArtMethod for single implementation).
1113 CHECK(method.IsIntrinsic() || !method.HasSingleImplementationFlag());
1114 if (method.IsRuntimeMethod()) {
1115 return;
1116 }
1117
1118 // Pointer to the method we're currently using.
1119 uint8_t* pointer = reinterpret_cast<uint8_t*>(&method);
1120 // The data pointer of that method that we want to keep.
1121 uint8_t* data_pointer = pointer + ArtMethod::DataOffset(kRuntimePointerSize).Int32Value();
1122 if (method.IsNative() && data_pointer >= page_start && data_pointer < page_end) {
1123 // The data pointer of the ArtMethod in the shared memory we are going to remap into our
1124 // own mapping. This is the data that we will see after the remap.
1125 uint8_t* new_data_pointer =
1126 child_mapping_methods.Begin() + offset + (data_pointer - page_start);
1127 CopyIfDifferent(new_data_pointer, data_pointer, sizeof(void*));
1128 }
1129
1130 // The entrypoint of the method we're currently using and that we want to
1131 // keep.
1132 uint8_t* entry_point_pointer = pointer +
1133 ArtMethod::EntryPointFromQuickCompiledCodeOffset(kRuntimePointerSize).Int32Value();
1134 if (!method.GetDeclaringClassUnchecked()->IsVisiblyInitialized() &&
1135 method.IsStatic() &&
1136 !method.IsConstructor() &&
1137 entry_point_pointer >= page_start &&
1138 entry_point_pointer < page_end) {
1139 // The entry point of the ArtMethod in the shared memory we are going to remap into our
1140 // own mapping. This is the entrypoint that we will see after the remap.
1141 uint8_t* new_entry_point_pointer =
1142 child_mapping_methods.Begin() + offset + (entry_point_pointer - page_start);
1143 CopyIfDifferent(new_entry_point_pointer, entry_point_pointer, sizeof(void*));
1144 }
1145 }, space->Begin(), kRuntimePointerSize);
1146
1147 // Map the memory in the boot image range.
1148 if (mremap(child_mapping_methods.Begin() + offset,
1149 capacity,
1150 capacity,
1151 MREMAP_FIXED | MREMAP_MAYMOVE,
1152 page_start) == MAP_FAILED) {
1153 PLOG(WARNING) << "Fail to mremap boot image methods for " << space->GetImageFilename();
1154 }
1155 offset += capacity;
1156 }
1157
1158 // The private mapping created for this process has been mremaped. We can
1159 // reset it.
1160 child_mapping_methods.Reset();
1161 LOG(INFO) << "Successfully mapped boot image methods";
1162 }
1163
InZygoteUsingJit()1164 bool Jit::InZygoteUsingJit() {
1165 Runtime* runtime = Runtime::Current();
1166 return runtime->IsZygote() && runtime->HasImageWithProfile() && runtime->UseJitCompilation();
1167 }
1168
CreateThreadPool()1169 void Jit::CreateThreadPool() {
1170 // There is a DCHECK in the 'AddSamples' method to ensure the tread pool
1171 // is not null when we instrument.
1172
1173 // We need peers as we may report the JIT thread, e.g., in the debugger.
1174 constexpr bool kJitPoolNeedsPeers = true;
1175 thread_pool_.reset(new ThreadPool("Jit thread pool", 1, kJitPoolNeedsPeers));
1176
1177 Runtime* runtime = Runtime::Current();
1178 thread_pool_->SetPthreadPriority(
1179 runtime->IsZygote()
1180 ? options_->GetZygoteThreadPoolPthreadPriority()
1181 : options_->GetThreadPoolPthreadPriority());
1182 Start();
1183
1184 if (runtime->IsZygote()) {
1185 // To speed up class lookups, generate a type lookup table for
1186 // dex files not backed by oat file.
1187 for (const DexFile* dex_file : runtime->GetClassLinker()->GetBootClassPath()) {
1188 if (dex_file->GetOatDexFile() == nullptr) {
1189 TypeLookupTable type_lookup_table = TypeLookupTable::Create(*dex_file);
1190 type_lookup_tables_.push_back(
1191 std::make_unique<art::OatDexFile>(std::move(type_lookup_table)));
1192 dex_file->SetOatDexFile(type_lookup_tables_.back().get());
1193 }
1194 }
1195
1196 // Add a task that will verify boot classpath jars that were not
1197 // pre-compiled.
1198 thread_pool_->AddTask(Thread::Current(), new ZygoteVerificationTask());
1199 }
1200
1201 if (InZygoteUsingJit()) {
1202 // If we have an image with a profile, request a JIT task to
1203 // compile all methods in that profile.
1204 thread_pool_->AddTask(Thread::Current(), new ZygoteTask());
1205
1206 // And create mappings to share boot image methods memory from the zygote to
1207 // child processes.
1208
1209 // Compute the total capacity required for the boot image methods.
1210 uint64_t total_capacity = 0;
1211 for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
1212 const ImageHeader& header = space->GetImageHeader();
1213 const ImageSection& section = header.GetMethodsSection();
1214 // Mappings need to be at the page level.
1215 uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), kPageSize);
1216 uint8_t* page_end =
1217 AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), kPageSize);
1218 if (page_end > page_start) {
1219 total_capacity += (page_end - page_start);
1220 }
1221 }
1222
1223 // Create the child and zygote mappings to the boot image methods.
1224 if (total_capacity > 0) {
1225 // Start with '/boot' and end with '.art' to match the pattern recognized
1226 // by android_os_Debug.cpp for boot images.
1227 const char* name = "/boot-image-methods.art";
1228 unique_fd mem_fd =
1229 unique_fd(art::memfd_create(name, /* flags= */ MFD_ALLOW_SEALING | MFD_CLOEXEC));
1230 if (mem_fd.get() == -1) {
1231 PLOG(WARNING) << "Could not create boot image methods file descriptor";
1232 return;
1233 }
1234 if (ftruncate(mem_fd.get(), total_capacity) != 0) {
1235 PLOG(WARNING) << "Failed to truncate boot image methods file to " << total_capacity;
1236 return;
1237 }
1238 std::string error_str;
1239
1240 // Create the shared mapping eagerly, as this prevents other processes
1241 // from adding the writable seal.
1242 zygote_mapping_methods_ = MemMap::MapFile(
1243 total_capacity,
1244 PROT_READ | PROT_WRITE,
1245 MAP_SHARED,
1246 mem_fd,
1247 /* start= */ 0,
1248 /* low_4gb= */ false,
1249 "boot-image-methods",
1250 &error_str);
1251
1252 if (!zygote_mapping_methods_.IsValid()) {
1253 LOG(WARNING) << "Failed to create zygote mapping of boot image methods: " << error_str;
1254 return;
1255 }
1256 if (zygote_mapping_methods_.MadviseDontFork() != 0) {
1257 LOG(WARNING) << "Failed to madvise dont fork boot image methods";
1258 zygote_mapping_methods_ = MemMap();
1259 return;
1260 }
1261
1262 // We should use the F_SEAL_FUTURE_WRITE flag, but this has unexpected
1263 // behavior on private mappings after fork (the mapping becomes shared between
1264 // parent and children), see b/143833776.
1265 // We will seal the write once we are done writing to the shared mapping.
1266 if (fcntl(mem_fd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW) == -1) {
1267 PLOG(WARNING) << "Failed to seal boot image methods file descriptor";
1268 zygote_mapping_methods_ = MemMap();
1269 return;
1270 }
1271 fd_methods_ = unique_fd(mem_fd.release());
1272 fd_methods_size_ = total_capacity;
1273 }
1274 }
1275 }
1276
RegisterDexFiles(const std::vector<std::unique_ptr<const DexFile>> & dex_files,jobject class_loader)1277 void Jit::RegisterDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
1278 jobject class_loader) {
1279 if (dex_files.empty()) {
1280 return;
1281 }
1282 Runtime* runtime = Runtime::Current();
1283 // If the runtime is debuggable, no need to precompile methods.
1284 if (runtime->IsSystemServer() &&
1285 UseJitCompilation() &&
1286 options_->UseProfiledJitCompilation() &&
1287 runtime->HasImageWithProfile() &&
1288 !runtime->IsJavaDebuggable()) {
1289 thread_pool_->AddTask(Thread::Current(), new JitProfileTask(dex_files, class_loader));
1290 }
1291 }
1292
CompileMethodFromProfile(Thread * self,ClassLinker * class_linker,uint32_t method_idx,Handle<mirror::DexCache> dex_cache,Handle<mirror::ClassLoader> class_loader,bool add_to_queue,bool compile_after_boot)1293 bool Jit::CompileMethodFromProfile(Thread* self,
1294 ClassLinker* class_linker,
1295 uint32_t method_idx,
1296 Handle<mirror::DexCache> dex_cache,
1297 Handle<mirror::ClassLoader> class_loader,
1298 bool add_to_queue,
1299 bool compile_after_boot) {
1300 ArtMethod* method = class_linker->ResolveMethodWithoutInvokeType(
1301 method_idx, dex_cache, class_loader);
1302 if (method == nullptr) {
1303 self->ClearException();
1304 return false;
1305 }
1306 if (!method->IsCompilable() || !method->IsInvokable()) {
1307 return false;
1308 }
1309 if (method->IsPreCompiled()) {
1310 // Already seen by another profile.
1311 return false;
1312 }
1313 const void* entry_point = method->GetEntryPointFromQuickCompiledCode();
1314 if (class_linker->IsQuickToInterpreterBridge(entry_point) ||
1315 class_linker->IsQuickGenericJniStub(entry_point) ||
1316 (entry_point == interpreter::GetNterpEntryPoint()) ||
1317 // We explicitly check for the stub. The trampoline is for methods backed by
1318 // a .oat file that has a compiled version of the method.
1319 (entry_point == GetQuickResolutionStub())) {
1320 VLOG(jit) << "JIT Zygote processing method " << ArtMethod::PrettyMethod(method)
1321 << " from profile";
1322 method->SetPreCompiled();
1323 if (!add_to_queue) {
1324 CompileMethod(method, self, CompilationKind::kOptimized, /* prejit= */ true);
1325 } else {
1326 Task* task = new JitCompileTask(
1327 method, JitCompileTask::TaskKind::kPreCompile, CompilationKind::kOptimized);
1328 if (compile_after_boot) {
1329 AddPostBootTask(self, task);
1330 } else {
1331 thread_pool_->AddTask(self, task);
1332 }
1333 return true;
1334 }
1335 }
1336 return false;
1337 }
1338
CompileMethodsFromBootProfile(Thread * self,const std::vector<const DexFile * > & dex_files,const std::string & profile_file,Handle<mirror::ClassLoader> class_loader,bool add_to_queue)1339 uint32_t Jit::CompileMethodsFromBootProfile(
1340 Thread* self,
1341 const std::vector<const DexFile*>& dex_files,
1342 const std::string& profile_file,
1343 Handle<mirror::ClassLoader> class_loader,
1344 bool add_to_queue) {
1345 unix_file::FdFile profile(profile_file.c_str(), O_RDONLY, true);
1346
1347 if (profile.Fd() == -1) {
1348 PLOG(WARNING) << "No boot profile: " << profile_file;
1349 return 0u;
1350 }
1351
1352 ProfileBootInfo profile_info;
1353 if (!profile_info.Load(profile.Fd(), dex_files)) {
1354 LOG(ERROR) << "Could not load profile file: " << profile_file;
1355 return 0u;
1356 }
1357
1358 ScopedObjectAccess soa(self);
1359 VariableSizedHandleScope handles(self);
1360 std::vector<Handle<mirror::DexCache>> dex_caches;
1361 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1362 for (const DexFile* dex_file : profile_info.GetDexFiles()) {
1363 dex_caches.push_back(handles.NewHandle(class_linker->FindDexCache(self, *dex_file)));
1364 }
1365
1366 uint32_t added_to_queue = 0;
1367 for (const std::pair<uint32_t, uint32_t>& pair : profile_info.GetMethods()) {
1368 if (CompileMethodFromProfile(self,
1369 class_linker,
1370 pair.second,
1371 dex_caches[pair.first],
1372 class_loader,
1373 add_to_queue,
1374 /*compile_after_boot=*/false)) {
1375 ++added_to_queue;
1376 }
1377 }
1378 return added_to_queue;
1379 }
1380
CompileMethodsFromProfile(Thread * self,const std::vector<const DexFile * > & dex_files,const std::string & profile_file,Handle<mirror::ClassLoader> class_loader,bool add_to_queue)1381 uint32_t Jit::CompileMethodsFromProfile(
1382 Thread* self,
1383 const std::vector<const DexFile*>& dex_files,
1384 const std::string& profile_file,
1385 Handle<mirror::ClassLoader> class_loader,
1386 bool add_to_queue) {
1387
1388 if (profile_file.empty()) {
1389 LOG(WARNING) << "Expected a profile file in JIT zygote mode";
1390 return 0u;
1391 }
1392
1393 // We don't generate boot profiles on device, therefore we don't
1394 // need to lock the file.
1395 unix_file::FdFile profile(profile_file.c_str(), O_RDONLY, true);
1396
1397 if (profile.Fd() == -1) {
1398 PLOG(WARNING) << "No profile: " << profile_file;
1399 return 0u;
1400 }
1401
1402 ProfileCompilationInfo profile_info(/* for_boot_image= */ class_loader.IsNull());
1403 if (!profile_info.Load(profile.Fd())) {
1404 LOG(ERROR) << "Could not load profile file";
1405 return 0u;
1406 }
1407 ScopedObjectAccess soa(self);
1408 StackHandleScope<1> hs(self);
1409 MutableHandle<mirror::DexCache> dex_cache = hs.NewHandle<mirror::DexCache>(nullptr);
1410 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1411 uint32_t added_to_queue = 0u;
1412 for (const DexFile* dex_file : dex_files) {
1413 std::set<dex::TypeIndex> class_types;
1414 std::set<uint16_t> all_methods;
1415 if (!profile_info.GetClassesAndMethods(*dex_file,
1416 &class_types,
1417 &all_methods,
1418 &all_methods,
1419 &all_methods)) {
1420 // This means the profile file did not reference the dex file, which is the case
1421 // if there's no classes and methods of that dex file in the profile.
1422 continue;
1423 }
1424 dex_cache.Assign(class_linker->FindDexCache(self, *dex_file));
1425 CHECK(dex_cache != nullptr) << "Could not find dex cache for " << dex_file->GetLocation();
1426
1427 for (uint16_t method_idx : all_methods) {
1428 if (CompileMethodFromProfile(self,
1429 class_linker,
1430 method_idx,
1431 dex_cache,
1432 class_loader,
1433 add_to_queue,
1434 /*compile_after_boot=*/true)) {
1435 ++added_to_queue;
1436 }
1437 }
1438 }
1439
1440 // Add a task to run when all compilation is done.
1441 AddPostBootTask(self, new JitDoneCompilingProfileTask(dex_files));
1442 return added_to_queue;
1443 }
1444
IgnoreSamplesForMethod(ArtMethod * method)1445 bool Jit::IgnoreSamplesForMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
1446 if (method->IsClassInitializer() || !method->IsCompilable()) {
1447 // We do not want to compile such methods.
1448 return true;
1449 }
1450 if (method->IsNative()) {
1451 ObjPtr<mirror::Class> klass = method->GetDeclaringClass();
1452 if (klass == GetClassRoot<mirror::MethodHandle>() ||
1453 klass == GetClassRoot<mirror::VarHandle>()) {
1454 // MethodHandle and VarHandle invocation methods are required to throw an
1455 // UnsupportedOperationException if invoked reflectively. We achieve this by having native
1456 // implementations that raise the exception. We need to disable JIT compilation of these JNI
1457 // methods as it can lead to transitioning between JIT compiled JNI stubs and generic JNI
1458 // stubs. Since these stubs have different stack representations we can then crash in stack
1459 // walking (b/78151261).
1460 return true;
1461 }
1462 }
1463 return false;
1464 }
1465
EnqueueOptimizedCompilation(ArtMethod * method,Thread * self)1466 void Jit::EnqueueOptimizedCompilation(ArtMethod* method, Thread* self) {
1467 // Reset the hotness counter so the baseline compiled code doesn't call this
1468 // method repeatedly.
1469 GetCodeCache()->ResetHotnessCounter(method, self);
1470
1471 if (thread_pool_ == nullptr) {
1472 return;
1473 }
1474 // We arrive here after a baseline compiled code has reached its baseline
1475 // hotness threshold. If we're not only using the baseline compiler, enqueue a compilation
1476 // task that will compile optimize the method.
1477 if (!options_->UseBaselineCompiler()) {
1478 thread_pool_->AddTask(
1479 self,
1480 new JitCompileTask(method,
1481 JitCompileTask::TaskKind::kCompile,
1482 CompilationKind::kOptimized));
1483 }
1484 }
1485
1486 class ScopedSetRuntimeThread {
1487 public:
ScopedSetRuntimeThread(Thread * self)1488 explicit ScopedSetRuntimeThread(Thread* self)
1489 : self_(self), was_runtime_thread_(self_->IsRuntimeThread()) {
1490 self_->SetIsRuntimeThread(true);
1491 }
1492
~ScopedSetRuntimeThread()1493 ~ScopedSetRuntimeThread() {
1494 self_->SetIsRuntimeThread(was_runtime_thread_);
1495 }
1496
1497 private:
1498 Thread* self_;
1499 bool was_runtime_thread_;
1500 };
1501
MethodEntered(Thread * thread,ArtMethod * method)1502 void Jit::MethodEntered(Thread* thread, ArtMethod* method) {
1503 Runtime* runtime = Runtime::Current();
1504 if (UNLIKELY(runtime->UseJitCompilation() && JitAtFirstUse())) {
1505 ArtMethod* np_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
1506 if (np_method->IsCompilable()) {
1507 // TODO(ngeoffray): For JIT at first use, use kPreCompile. Currently we don't due to
1508 // conflicts with jitzygote optimizations.
1509 JitCompileTask compile_task(
1510 method, JitCompileTask::TaskKind::kCompile, CompilationKind::kOptimized);
1511 // Fake being in a runtime thread so that class-load behavior will be the same as normal jit.
1512 ScopedSetRuntimeThread ssrt(thread);
1513 compile_task.Run(thread);
1514 }
1515 return;
1516 }
1517
1518 AddSamples(thread, method);
1519 }
1520
WaitForCompilationToFinish(Thread * self)1521 void Jit::WaitForCompilationToFinish(Thread* self) {
1522 if (thread_pool_ != nullptr) {
1523 thread_pool_->Wait(self, false, false);
1524 }
1525 }
1526
Stop()1527 void Jit::Stop() {
1528 Thread* self = Thread::Current();
1529 // TODO(ngeoffray): change API to not require calling WaitForCompilationToFinish twice.
1530 WaitForCompilationToFinish(self);
1531 GetThreadPool()->StopWorkers(self);
1532 WaitForCompilationToFinish(self);
1533 }
1534
Start()1535 void Jit::Start() {
1536 GetThreadPool()->StartWorkers(Thread::Current());
1537 }
1538
ScopedJitSuspend()1539 ScopedJitSuspend::ScopedJitSuspend() {
1540 jit::Jit* jit = Runtime::Current()->GetJit();
1541 was_on_ = (jit != nullptr) && (jit->GetThreadPool() != nullptr);
1542 if (was_on_) {
1543 jit->Stop();
1544 }
1545 }
1546
~ScopedJitSuspend()1547 ScopedJitSuspend::~ScopedJitSuspend() {
1548 if (was_on_) {
1549 DCHECK(Runtime::Current()->GetJit() != nullptr);
1550 DCHECK(Runtime::Current()->GetJit()->GetThreadPool() != nullptr);
1551 Runtime::Current()->GetJit()->Start();
1552 }
1553 }
1554
RunPollingThread(void * arg)1555 static void* RunPollingThread(void* arg) {
1556 Jit* jit = reinterpret_cast<Jit*>(arg);
1557 do {
1558 sleep(10);
1559 } while (!jit->GetCodeCache()->GetZygoteMap()->IsCompilationNotified());
1560
1561 // We will suspend other threads: we can only do that if we're attached to the
1562 // runtime.
1563 Runtime* runtime = Runtime::Current();
1564 bool thread_attached = runtime->AttachCurrentThread(
1565 "BootImagePollingThread",
1566 /* as_daemon= */ true,
1567 /* thread_group= */ nullptr,
1568 /* create_peer= */ false);
1569 CHECK(thread_attached);
1570
1571 {
1572 // Prevent other threads from running while we are remapping the boot image
1573 // ArtMethod's. Native threads might still be running, but they cannot
1574 // change the contents of ArtMethod's.
1575 ScopedSuspendAll ssa(__FUNCTION__);
1576 runtime->GetJit()->MapBootImageMethods();
1577 }
1578
1579 Runtime::Current()->DetachCurrentThread();
1580 return nullptr;
1581 }
1582
PostForkChildAction(bool is_system_server,bool is_zygote)1583 void Jit::PostForkChildAction(bool is_system_server, bool is_zygote) {
1584 // Clear the potential boot tasks inherited from the zygote.
1585 {
1586 MutexLock mu(Thread::Current(), boot_completed_lock_);
1587 tasks_after_boot_.clear();
1588 }
1589
1590 Runtime* const runtime = Runtime::Current();
1591 // Check if we'll need to remap the boot image methods.
1592 if (!is_zygote && fd_methods_ != -1) {
1593 // Create a thread that will poll the status of zygote compilation, and map
1594 // the private mapping of boot image methods.
1595 // For child zygote, we instead query IsCompilationNotified() post zygote fork.
1596 zygote_mapping_methods_.ResetInForkedProcess();
1597 pthread_t polling_thread;
1598 pthread_attr_t attr;
1599 CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread");
1600 CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED),
1601 "PTHREAD_CREATE_DETACHED");
1602 CHECK_PTHREAD_CALL(
1603 pthread_create,
1604 (&polling_thread, &attr, RunPollingThread, reinterpret_cast<void*>(this)),
1605 "Methods maps thread");
1606 }
1607
1608 if (is_zygote || runtime->IsSafeMode()) {
1609 // Delete the thread pool, we are not going to JIT.
1610 thread_pool_.reset(nullptr);
1611 return;
1612 }
1613 // At this point, the compiler options have been adjusted to the particular configuration
1614 // of the forked child. Parse them again.
1615 jit_compiler_->ParseCompilerOptions();
1616
1617 // Adjust the status of code cache collection: the status from zygote was to not collect.
1618 // JitAtFirstUse compiles the methods synchronously on mutator threads. While this should work
1619 // in theory it is causing deadlocks in some jvmti tests related to Jit GC. Hence, disabling
1620 // Jit GC for now (b/147208992).
1621 code_cache_->SetGarbageCollectCode(
1622 !jit_compiler_->GenerateDebugInfo() &&
1623 !runtime->GetInstrumentation()->AreExitStubsInstalled() &&
1624 !JitAtFirstUse());
1625
1626 if (is_system_server && runtime->HasImageWithProfile()) {
1627 // Disable garbage collection: we don't want it to delete methods we're compiling
1628 // through boot and system server profiles.
1629 // TODO(ngeoffray): Fix this so we still collect deoptimized and unused code.
1630 code_cache_->SetGarbageCollectCode(false);
1631 }
1632
1633 // We do this here instead of PostZygoteFork, as NativeDebugInfoPostFork only
1634 // applies to a child.
1635 NativeDebugInfoPostFork();
1636 }
1637
PreZygoteFork()1638 void Jit::PreZygoteFork() {
1639 if (thread_pool_ == nullptr) {
1640 return;
1641 }
1642 thread_pool_->DeleteThreads();
1643
1644 NativeDebugInfoPreFork();
1645 }
1646
PostZygoteFork()1647 void Jit::PostZygoteFork() {
1648 Runtime* runtime = Runtime::Current();
1649 if (thread_pool_ == nullptr) {
1650 // If this is a child zygote, check if we need to remap the boot image
1651 // methods.
1652 if (runtime->IsZygote() &&
1653 fd_methods_ != -1 &&
1654 code_cache_->GetZygoteMap()->IsCompilationNotified()) {
1655 ScopedSuspendAll ssa(__FUNCTION__);
1656 MapBootImageMethods();
1657 }
1658 return;
1659 }
1660 if (runtime->IsZygote() && code_cache_->GetZygoteMap()->IsCompilationDoneButNotNotified()) {
1661 // Copy the boot image methods data to the mappings we created to share
1662 // with the children. We do this here as we are the only thread running and
1663 // we don't risk other threads concurrently updating the ArtMethod's.
1664 CHECK_EQ(GetTaskCount(), 1);
1665 NotifyZygoteCompilationDone();
1666 CHECK(code_cache_->GetZygoteMap()->IsCompilationNotified());
1667 }
1668 thread_pool_->CreateThreads();
1669 thread_pool_->SetPthreadPriority(
1670 runtime->IsZygote()
1671 ? options_->GetZygoteThreadPoolPthreadPriority()
1672 : options_->GetThreadPoolPthreadPriority());
1673 }
1674
AddPostBootTask(Thread * self,Task * task)1675 void Jit::AddPostBootTask(Thread* self, Task* task) {
1676 MutexLock mu(self, boot_completed_lock_);
1677 if (boot_completed_) {
1678 thread_pool_->AddTask(self, task);
1679 } else {
1680 tasks_after_boot_.push_back(task);
1681 }
1682 }
1683
BootCompleted()1684 void Jit::BootCompleted() {
1685 Thread* self = Thread::Current();
1686 std::deque<Task*> tasks;
1687 {
1688 MutexLock mu(self, boot_completed_lock_);
1689 tasks = std::move(tasks_after_boot_);
1690 boot_completed_ = true;
1691 }
1692 for (Task* task : tasks) {
1693 thread_pool_->AddTask(self, task);
1694 }
1695 }
1696
CanEncodeMethod(ArtMethod * method,bool is_for_shared_region) const1697 bool Jit::CanEncodeMethod(ArtMethod* method, bool is_for_shared_region) const {
1698 return !is_for_shared_region ||
1699 Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(method->GetDeclaringClass());
1700 }
1701
CanEncodeClass(ObjPtr<mirror::Class> cls,bool is_for_shared_region) const1702 bool Jit::CanEncodeClass(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const {
1703 return !is_for_shared_region || Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(cls);
1704 }
1705
CanEncodeString(ObjPtr<mirror::String> string,bool is_for_shared_region) const1706 bool Jit::CanEncodeString(ObjPtr<mirror::String> string, bool is_for_shared_region) const {
1707 return !is_for_shared_region || Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(string);
1708 }
1709
CanAssumeInitialized(ObjPtr<mirror::Class> cls,bool is_for_shared_region) const1710 bool Jit::CanAssumeInitialized(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const {
1711 if (!is_for_shared_region) {
1712 return cls->IsInitialized();
1713 } else {
1714 // Look up the class status in the oat file.
1715 const DexFile& dex_file = *cls->GetDexCache()->GetDexFile();
1716 const OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
1717 // In case we run without an image there won't be a backing oat file.
1718 if (oat_dex_file == nullptr || oat_dex_file->GetOatFile() == nullptr) {
1719 return false;
1720 }
1721 uint16_t class_def_index = cls->GetDexClassDefIndex();
1722 return oat_dex_file->GetOatClass(class_def_index).GetStatus() >= ClassStatus::kInitialized;
1723 }
1724 }
1725
MaybeEnqueueCompilation(ArtMethod * method,Thread * self)1726 void Jit::MaybeEnqueueCompilation(ArtMethod* method, Thread* self) {
1727 if (thread_pool_ == nullptr) {
1728 return;
1729 }
1730
1731 if (JitAtFirstUse()) {
1732 // Tests might request JIT on first use (compiled synchronously in the interpreter).
1733 return;
1734 }
1735
1736 if (!UseJitCompilation()) {
1737 return;
1738 }
1739
1740 if (IgnoreSamplesForMethod(method)) {
1741 return;
1742 }
1743
1744 if (GetCodeCache()->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
1745 if (!method->IsNative() && !code_cache_->IsOsrCompiled(method)) {
1746 // If we already have compiled code for it, nterp may be stuck in a loop.
1747 // Compile OSR.
1748 thread_pool_->AddTask(
1749 self,
1750 new JitCompileTask(method, JitCompileTask::TaskKind::kCompile, CompilationKind::kOsr));
1751 }
1752 return;
1753 }
1754
1755 // Check if we have precompiled this method.
1756 if (UNLIKELY(method->IsPreCompiled())) {
1757 if (!NeedsClinitCheckBeforeCall(method) ||
1758 method->GetDeclaringClass()->IsVisiblyInitialized()) {
1759 const void* entry_point = code_cache_->GetSavedEntryPointOfPreCompiledMethod(method);
1760 if (entry_point != nullptr) {
1761 Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(method, entry_point);
1762 }
1763 }
1764 return;
1765 }
1766
1767 static constexpr size_t kIndividualSharedMethodHotnessThreshold = 0xff;
1768 if (method->IsMemorySharedMethod()) {
1769 MutexLock mu(self, lock_);
1770 auto it = shared_method_counters_.find(method);
1771 if (it == shared_method_counters_.end()) {
1772 shared_method_counters_[method] = kIndividualSharedMethodHotnessThreshold;
1773 return;
1774 } else if (it->second != 0) {
1775 DCHECK_LE(it->second, kIndividualSharedMethodHotnessThreshold);
1776 shared_method_counters_[method] = it->second - 1;
1777 return;
1778 } else {
1779 shared_method_counters_[method] = kIndividualSharedMethodHotnessThreshold;
1780 }
1781 }
1782
1783 if (!method->IsNative() && GetCodeCache()->CanAllocateProfilingInfo()) {
1784 thread_pool_->AddTask(
1785 self,
1786 new JitCompileTask(method, JitCompileTask::TaskKind::kCompile, CompilationKind::kBaseline));
1787 } else {
1788 thread_pool_->AddTask(
1789 self,
1790 new JitCompileTask(method,
1791 JitCompileTask::TaskKind::kCompile,
1792 CompilationKind::kOptimized));
1793 }
1794 }
1795
1796 } // namespace jit
1797 } // namespace art
1798