1 /*
2 * Copyright 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "jit.h"
18
19 #include <dlfcn.h>
20
21 #include "art_method-inl.h"
22 #include "base/enums.h"
23 #include "base/file_utils.h"
24 #include "base/logging.h" // For VLOG.
25 #include "base/memfd.h"
26 #include "base/memory_tool.h"
27 #include "base/runtime_debug.h"
28 #include "base/scoped_flock.h"
29 #include "base/utils.h"
30 #include "class_root-inl.h"
31 #include "compilation_kind.h"
32 #include "debugger.h"
33 #include "dex/type_lookup_table.h"
34 #include "gc/space/image_space.h"
35 #include "entrypoints/entrypoint_utils-inl.h"
36 #include "entrypoints/runtime_asm_entrypoints.h"
37 #include "image-inl.h"
38 #include "interpreter/interpreter.h"
39 #include "jit-inl.h"
40 #include "jit_code_cache.h"
41 #include "jni/java_vm_ext.h"
42 #include "mirror/method_handle_impl.h"
43 #include "mirror/var_handle.h"
44 #include "oat_file.h"
45 #include "oat_file_manager.h"
46 #include "oat_quick_method_header.h"
47 #include "profile/profile_boot_info.h"
48 #include "profile/profile_compilation_info.h"
49 #include "profile_saver.h"
50 #include "runtime.h"
51 #include "runtime_options.h"
52 #include "stack.h"
53 #include "stack_map.h"
54 #include "thread-inl.h"
55 #include "thread_list.h"
56
57 using android::base::unique_fd;
58
59 namespace art {
60 namespace jit {
61
62 static constexpr bool kEnableOnStackReplacement = true;
63
64 // Maximum permitted threshold value.
65 static constexpr uint32_t kJitMaxThreshold = std::numeric_limits<uint16_t>::max();
66
67 static constexpr uint32_t kJitDefaultOptimizeThreshold = 0xffff;
68 // Different optimization threshold constants. These default to the equivalent optimization
69 // thresholds divided by 2, but can be overridden at the command-line.
70 static constexpr uint32_t kJitStressDefaultOptimizeThreshold = kJitDefaultOptimizeThreshold / 2;
71 static constexpr uint32_t kJitSlowStressDefaultOptimizeThreshold =
72 kJitStressDefaultOptimizeThreshold / 2;
73
74 static constexpr uint32_t kJitDefaultWarmupThreshold = 0xffff;
75 // Different warm-up threshold constants. These default to the equivalent warmup thresholds divided
76 // by 2, but can be overridden at the command-line.
77 static constexpr uint32_t kJitStressDefaultWarmupThreshold = kJitDefaultWarmupThreshold / 2;
78 static constexpr uint32_t kJitSlowStressDefaultWarmupThreshold =
79 kJitStressDefaultWarmupThreshold / 2;
80
81 DEFINE_RUNTIME_DEBUG_FLAG(Jit, kSlowMode);
82
83 // JIT compiler
84 void* Jit::jit_library_handle_ = nullptr;
85 JitCompilerInterface* Jit::jit_compiler_ = nullptr;
86 JitCompilerInterface* (*Jit::jit_load_)(void) = nullptr;
87
CreateFromRuntimeArguments(const RuntimeArgumentMap & options)88 JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& options) {
89 auto* jit_options = new JitOptions;
90 jit_options->use_jit_compilation_ = options.GetOrDefault(RuntimeArgumentMap::UseJitCompilation);
91 jit_options->use_profiled_jit_compilation_ =
92 options.GetOrDefault(RuntimeArgumentMap::UseProfiledJitCompilation);
93
94 jit_options->code_cache_initial_capacity_ =
95 options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheInitialCapacity);
96 jit_options->code_cache_max_capacity_ =
97 options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheMaxCapacity);
98 jit_options->dump_info_on_shutdown_ =
99 options.Exists(RuntimeArgumentMap::DumpJITInfoOnShutdown);
100 jit_options->profile_saver_options_ =
101 options.GetOrDefault(RuntimeArgumentMap::ProfileSaverOpts);
102 jit_options->thread_pool_pthread_priority_ =
103 options.GetOrDefault(RuntimeArgumentMap::JITPoolThreadPthreadPriority);
104 jit_options->zygote_thread_pool_pthread_priority_ =
105 options.GetOrDefault(RuntimeArgumentMap::JITZygotePoolThreadPthreadPriority);
106
107 // Set default optimize threshold to aid with checking defaults.
108 jit_options->optimize_threshold_ =
109 kIsDebugBuild
110 ? (Jit::kSlowMode
111 ? kJitSlowStressDefaultOptimizeThreshold
112 : kJitStressDefaultOptimizeThreshold)
113 : kJitDefaultOptimizeThreshold;
114
115 // Set default warm-up threshold to aid with checking defaults.
116 jit_options->warmup_threshold_ =
117 kIsDebugBuild ? (Jit::kSlowMode
118 ? kJitSlowStressDefaultWarmupThreshold
119 : kJitStressDefaultWarmupThreshold)
120 : kJitDefaultWarmupThreshold;
121
122 if (options.Exists(RuntimeArgumentMap::JITOptimizeThreshold)) {
123 jit_options->optimize_threshold_ = *options.Get(RuntimeArgumentMap::JITOptimizeThreshold);
124 }
125 DCHECK_LE(jit_options->optimize_threshold_, kJitMaxThreshold);
126
127 if (options.Exists(RuntimeArgumentMap::JITWarmupThreshold)) {
128 jit_options->warmup_threshold_ = *options.Get(RuntimeArgumentMap::JITWarmupThreshold);
129 }
130 DCHECK_LE(jit_options->warmup_threshold_, kJitMaxThreshold);
131
132 if (options.Exists(RuntimeArgumentMap::JITPriorityThreadWeight)) {
133 jit_options->priority_thread_weight_ =
134 *options.Get(RuntimeArgumentMap::JITPriorityThreadWeight);
135 if (jit_options->priority_thread_weight_ > jit_options->warmup_threshold_) {
136 LOG(FATAL) << "Priority thread weight is above the warmup threshold.";
137 } else if (jit_options->priority_thread_weight_ == 0) {
138 LOG(FATAL) << "Priority thread weight cannot be 0.";
139 }
140 } else {
141 jit_options->priority_thread_weight_ = std::max(
142 jit_options->warmup_threshold_ / Jit::kDefaultPriorityThreadWeightRatio,
143 static_cast<size_t>(1));
144 }
145
146 if (options.Exists(RuntimeArgumentMap::JITInvokeTransitionWeight)) {
147 jit_options->invoke_transition_weight_ =
148 *options.Get(RuntimeArgumentMap::JITInvokeTransitionWeight);
149 if (jit_options->invoke_transition_weight_ > jit_options->warmup_threshold_) {
150 LOG(FATAL) << "Invoke transition weight is above the warmup threshold.";
151 } else if (jit_options->invoke_transition_weight_ == 0) {
152 LOG(FATAL) << "Invoke transition weight cannot be 0.";
153 }
154 } else {
155 jit_options->invoke_transition_weight_ = std::max(
156 jit_options->warmup_threshold_ / Jit::kDefaultInvokeTransitionWeightRatio,
157 static_cast<size_t>(1));
158 }
159
160 return jit_options;
161 }
162
DumpInfo(std::ostream & os)163 void Jit::DumpInfo(std::ostream& os) {
164 code_cache_->Dump(os);
165 cumulative_timings_.Dump(os);
166 MutexLock mu(Thread::Current(), lock_);
167 memory_use_.PrintMemoryUse(os);
168 }
169
DumpForSigQuit(std::ostream & os)170 void Jit::DumpForSigQuit(std::ostream& os) {
171 DumpInfo(os);
172 ProfileSaver::DumpInstanceInfo(os);
173 }
174
AddTimingLogger(const TimingLogger & logger)175 void Jit::AddTimingLogger(const TimingLogger& logger) {
176 cumulative_timings_.AddLogger(logger);
177 }
178
Jit(JitCodeCache * code_cache,JitOptions * options)179 Jit::Jit(JitCodeCache* code_cache, JitOptions* options)
180 : code_cache_(code_cache),
181 options_(options),
182 boot_completed_lock_("Jit::boot_completed_lock_"),
183 cumulative_timings_("JIT timings"),
184 memory_use_("Memory used for compilation", 16),
185 lock_("JIT memory use lock"),
186 zygote_mapping_methods_(),
187 fd_methods_(-1),
188 fd_methods_size_(0) {}
189
Create(JitCodeCache * code_cache,JitOptions * options)190 Jit* Jit::Create(JitCodeCache* code_cache, JitOptions* options) {
191 if (jit_load_ == nullptr) {
192 LOG(WARNING) << "Not creating JIT: library not loaded";
193 return nullptr;
194 }
195 jit_compiler_ = (jit_load_)();
196 if (jit_compiler_ == nullptr) {
197 LOG(WARNING) << "Not creating JIT: failed to allocate a compiler";
198 return nullptr;
199 }
200 std::unique_ptr<Jit> jit(new Jit(code_cache, options));
201
202 // If the code collector is enabled, check if that still holds:
203 // With 'perf', we want a 1-1 mapping between an address and a method.
204 // We aren't able to keep method pointers live during the instrumentation method entry trampoline
205 // so we will just disable jit-gc if we are doing that.
206 // JitAtFirstUse compiles the methods synchronously on mutator threads. While this should work
207 // in theory it is causing deadlocks in some jvmti tests related to Jit GC. Hence, disabling
208 // Jit GC for now (b/147208992).
209 if (code_cache->GetGarbageCollectCode()) {
210 code_cache->SetGarbageCollectCode(!jit_compiler_->GenerateDebugInfo() &&
211 !jit->JitAtFirstUse());
212 }
213
214 VLOG(jit) << "JIT created with initial_capacity="
215 << PrettySize(options->GetCodeCacheInitialCapacity())
216 << ", max_capacity=" << PrettySize(options->GetCodeCacheMaxCapacity())
217 << ", warmup_threshold=" << options->GetWarmupThreshold()
218 << ", optimize_threshold=" << options->GetOptimizeThreshold()
219 << ", profile_saver_options=" << options->GetProfileSaverOptions();
220
221 // We want to know whether the compiler is compiling baseline, as this
222 // affects how we GC ProfilingInfos.
223 for (const std::string& option : Runtime::Current()->GetCompilerOptions()) {
224 if (option == "--baseline") {
225 options->SetUseBaselineCompiler();
226 break;
227 }
228 }
229
230 // Notify native debugger about the classes already loaded before the creation of the jit.
231 jit->DumpTypeInfoForLoadedTypes(Runtime::Current()->GetClassLinker());
232 return jit.release();
233 }
234
235 template <typename T>
LoadSymbol(T * address,const char * name,std::string * error_msg)236 bool Jit::LoadSymbol(T* address, const char* name, std::string* error_msg) {
237 *address = reinterpret_cast<T>(dlsym(jit_library_handle_, name));
238 if (*address == nullptr) {
239 *error_msg = std::string("JIT couldn't find ") + name + std::string(" entry point");
240 return false;
241 }
242 return true;
243 }
244
LoadCompilerLibrary(std::string * error_msg)245 bool Jit::LoadCompilerLibrary(std::string* error_msg) {
246 jit_library_handle_ = dlopen(
247 kIsDebugBuild ? "libartd-compiler.so" : "libart-compiler.so", RTLD_NOW);
248 if (jit_library_handle_ == nullptr) {
249 std::ostringstream oss;
250 oss << "JIT could not load libart-compiler.so: " << dlerror();
251 *error_msg = oss.str();
252 return false;
253 }
254 if (!LoadSymbol(&jit_load_, "jit_load", error_msg)) {
255 dlclose(jit_library_handle_);
256 return false;
257 }
258 return true;
259 }
260
CompileMethodInternal(ArtMethod * method,Thread * self,CompilationKind compilation_kind,bool prejit)261 bool Jit::CompileMethodInternal(ArtMethod* method,
262 Thread* self,
263 CompilationKind compilation_kind,
264 bool prejit) {
265 if (kIsDebugBuild) {
266 MutexLock mu(self, *Locks::jit_lock_);
267 CHECK(GetCodeCache()->IsMethodBeingCompiled(method, compilation_kind));
268 }
269 DCHECK(Runtime::Current()->UseJitCompilation());
270 DCHECK(!method->IsRuntimeMethod());
271
272 // If the baseline flag was explicitly passed in the compiler options, change the compilation kind
273 // from optimized to baseline.
274 if (jit_compiler_->IsBaselineCompiler() && compilation_kind == CompilationKind::kOptimized) {
275 compilation_kind = CompilationKind::kBaseline;
276 }
277
278 // If we're asked to compile baseline, but we cannot allocate profiling infos,
279 // change the compilation kind to optimized.
280 if ((compilation_kind == CompilationKind::kBaseline) &&
281 !GetCodeCache()->CanAllocateProfilingInfo()) {
282 compilation_kind = CompilationKind::kOptimized;
283 }
284
285 // Don't compile the method if it has breakpoints.
286 if (Runtime::Current()->GetInstrumentation()->IsDeoptimized(method)) {
287 VLOG(jit) << "JIT not compiling " << method->PrettyMethod()
288 << " due to not being safe to jit according to runtime-callbacks. For example, there"
289 << " could be breakpoints in this method.";
290 return false;
291 }
292
293 if (!method->IsCompilable()) {
294 DCHECK(method->GetDeclaringClass()->IsObsoleteObject() ||
295 method->IsProxyMethod()) << method->PrettyMethod();
296 VLOG(jit) << "JIT not compiling " << method->PrettyMethod() << " due to method being made "
297 << "obsolete while waiting for JIT task to run. This probably happened due to "
298 << "concurrent structural class redefinition.";
299 return false;
300 }
301
302 // Don't compile the method if we are supposed to be deoptimized.
303 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
304 if (instrumentation->AreAllMethodsDeoptimized() || instrumentation->IsDeoptimized(method)) {
305 VLOG(jit) << "JIT not compiling " << method->PrettyMethod() << " due to deoptimization";
306 return false;
307 }
308
309 JitMemoryRegion* region = GetCodeCache()->GetCurrentRegion();
310 if ((compilation_kind == CompilationKind::kOsr) && GetCodeCache()->IsSharedRegion(*region)) {
311 VLOG(jit) << "JIT not osr compiling "
312 << method->PrettyMethod()
313 << " due to using shared region";
314 return false;
315 }
316
317 // If we get a request to compile a proxy method, we pass the actual Java method
318 // of that proxy method, as the compiler does not expect a proxy method.
319 ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
320 if (!code_cache_->NotifyCompilationOf(method_to_compile, self, compilation_kind, prejit)) {
321 return false;
322 }
323
324 VLOG(jit) << "Compiling method "
325 << ArtMethod::PrettyMethod(method_to_compile)
326 << " kind=" << compilation_kind;
327 bool success = jit_compiler_->CompileMethod(self, region, method_to_compile, compilation_kind);
328 code_cache_->DoneCompiling(method_to_compile, self);
329 if (!success) {
330 VLOG(jit) << "Failed to compile method "
331 << ArtMethod::PrettyMethod(method_to_compile)
332 << " kind=" << compilation_kind;
333 }
334 if (kIsDebugBuild) {
335 if (self->IsExceptionPending()) {
336 mirror::Throwable* exception = self->GetException();
337 LOG(FATAL) << "No pending exception expected after compiling "
338 << ArtMethod::PrettyMethod(method)
339 << ": "
340 << exception->Dump();
341 }
342 }
343 return success;
344 }
345
WaitForWorkersToBeCreated()346 void Jit::WaitForWorkersToBeCreated() {
347 if (thread_pool_ != nullptr) {
348 thread_pool_->WaitForWorkersToBeCreated();
349 }
350 }
351
DeleteThreadPool()352 void Jit::DeleteThreadPool() {
353 Thread* self = Thread::Current();
354 if (thread_pool_ != nullptr) {
355 std::unique_ptr<ThreadPool> pool;
356 {
357 ScopedSuspendAll ssa(__FUNCTION__);
358 // Clear thread_pool_ field while the threads are suspended.
359 // A mutator in the 'AddSamples' method will check against it.
360 pool = std::move(thread_pool_);
361 }
362
363 // When running sanitized, let all tasks finish to not leak. Otherwise just clear the queue.
364 if (!kRunningOnMemoryTool) {
365 pool->StopWorkers(self);
366 pool->RemoveAllTasks(self);
367 }
368 // We could just suspend all threads, but we know those threads
369 // will finish in a short period, so it's not worth adding a suspend logic
370 // here. Besides, this is only done for shutdown.
371 pool->Wait(self, false, false);
372 }
373 }
374
StartProfileSaver(const std::string & profile_filename,const std::vector<std::string> & code_paths,const std::string & ref_profile_filename)375 void Jit::StartProfileSaver(const std::string& profile_filename,
376 const std::vector<std::string>& code_paths,
377 const std::string& ref_profile_filename) {
378 if (options_->GetSaveProfilingInfo()) {
379 ProfileSaver::Start(options_->GetProfileSaverOptions(),
380 profile_filename,
381 code_cache_,
382 code_paths,
383 ref_profile_filename);
384 }
385 }
386
StopProfileSaver()387 void Jit::StopProfileSaver() {
388 if (options_->GetSaveProfilingInfo() && ProfileSaver::IsStarted()) {
389 ProfileSaver::Stop(options_->DumpJitInfoOnShutdown());
390 }
391 }
392
JitAtFirstUse()393 bool Jit::JitAtFirstUse() {
394 return HotMethodThreshold() == 0;
395 }
396
CanInvokeCompiledCode(ArtMethod * method)397 bool Jit::CanInvokeCompiledCode(ArtMethod* method) {
398 return code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode());
399 }
400
~Jit()401 Jit::~Jit() {
402 DCHECK_IMPLIES(options_->GetSaveProfilingInfo(), !ProfileSaver::IsStarted());
403 if (options_->DumpJitInfoOnShutdown()) {
404 DumpInfo(LOG_STREAM(INFO));
405 Runtime::Current()->DumpDeoptimizations(LOG_STREAM(INFO));
406 }
407 DeleteThreadPool();
408 if (jit_compiler_ != nullptr) {
409 delete jit_compiler_;
410 jit_compiler_ = nullptr;
411 }
412 if (jit_library_handle_ != nullptr) {
413 dlclose(jit_library_handle_);
414 jit_library_handle_ = nullptr;
415 }
416 }
417
NewTypeLoadedIfUsingJit(mirror::Class * type)418 void Jit::NewTypeLoadedIfUsingJit(mirror::Class* type) {
419 if (!Runtime::Current()->UseJitCompilation()) {
420 // No need to notify if we only use the JIT to save profiles.
421 return;
422 }
423 jit::Jit* jit = Runtime::Current()->GetJit();
424 if (jit->jit_compiler_->GenerateDebugInfo()) {
425 jit_compiler_->TypesLoaded(&type, 1);
426 }
427 }
428
DumpTypeInfoForLoadedTypes(ClassLinker * linker)429 void Jit::DumpTypeInfoForLoadedTypes(ClassLinker* linker) {
430 struct CollectClasses : public ClassVisitor {
431 bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
432 classes_.push_back(klass.Ptr());
433 return true;
434 }
435 std::vector<mirror::Class*> classes_;
436 };
437
438 if (jit_compiler_->GenerateDebugInfo()) {
439 ScopedObjectAccess so(Thread::Current());
440
441 CollectClasses visitor;
442 linker->VisitClasses(&visitor);
443 jit_compiler_->TypesLoaded(visitor.classes_.data(), visitor.classes_.size());
444 }
445 }
446
447 extern "C" void art_quick_osr_stub(void** stack,
448 size_t stack_size_in_bytes,
449 const uint8_t* native_pc,
450 JValue* result,
451 const char* shorty,
452 Thread* self);
453
PrepareForOsr(ArtMethod * method,uint32_t dex_pc,uint32_t * vregs)454 OsrData* Jit::PrepareForOsr(ArtMethod* method, uint32_t dex_pc, uint32_t* vregs) {
455 if (!kEnableOnStackReplacement) {
456 return nullptr;
457 }
458
459 // Cheap check if the method has been compiled already. That's an indicator that we should
460 // osr into it.
461 if (!GetCodeCache()->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
462 return nullptr;
463 }
464
465 // Fetch some data before looking up for an OSR method. We don't want thread
466 // suspension once we hold an OSR method, as the JIT code cache could delete the OSR
467 // method while we are being suspended.
468 CodeItemDataAccessor accessor(method->DexInstructionData());
469 const size_t number_of_vregs = accessor.RegistersSize();
470 std::string method_name(VLOG_IS_ON(jit) ? method->PrettyMethod() : "");
471 OsrData* osr_data = nullptr;
472
473 {
474 ScopedAssertNoThreadSuspension sts("Holding OSR method");
475 const OatQuickMethodHeader* osr_method = GetCodeCache()->LookupOsrMethodHeader(method);
476 if (osr_method == nullptr) {
477 // No osr method yet, just return to the interpreter.
478 return nullptr;
479 }
480
481 CodeInfo code_info(osr_method);
482
483 // Find stack map starting at the target dex_pc.
484 StackMap stack_map = code_info.GetOsrStackMapForDexPc(dex_pc);
485 if (!stack_map.IsValid()) {
486 // There is no OSR stack map for this dex pc offset. Just return to the interpreter in the
487 // hope that the next branch has one.
488 return nullptr;
489 }
490
491 // We found a stack map, now fill the frame with dex register values from the interpreter's
492 // shadow frame.
493 DexRegisterMap vreg_map = code_info.GetDexRegisterMapOf(stack_map);
494 DCHECK_EQ(vreg_map.size(), number_of_vregs);
495
496 size_t frame_size = osr_method->GetFrameSizeInBytes();
497
498 // Allocate memory to put shadow frame values. The osr stub will copy that memory to
499 // stack.
500 // Note that we could pass the shadow frame to the stub, and let it copy the values there,
501 // but that is engineering complexity not worth the effort for something like OSR.
502 osr_data = reinterpret_cast<OsrData*>(malloc(sizeof(OsrData) + frame_size));
503 if (osr_data == nullptr) {
504 return nullptr;
505 }
506 memset(osr_data, 0, sizeof(OsrData) + frame_size);
507 osr_data->frame_size = frame_size;
508
509 // Art ABI: ArtMethod is at the bottom of the stack.
510 osr_data->memory[0] = method;
511
512 if (vreg_map.empty()) {
513 // If we don't have a dex register map, then there are no live dex registers at
514 // this dex pc.
515 } else {
516 for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
517 DexRegisterLocation::Kind location = vreg_map[vreg].GetKind();
518 if (location == DexRegisterLocation::Kind::kNone) {
519 // Dex register is dead or uninitialized.
520 continue;
521 }
522
523 if (location == DexRegisterLocation::Kind::kConstant) {
524 // We skip constants because the compiled code knows how to handle them.
525 continue;
526 }
527
528 DCHECK_EQ(location, DexRegisterLocation::Kind::kInStack);
529
530 int32_t vreg_value = vregs[vreg];
531 int32_t slot_offset = vreg_map[vreg].GetStackOffsetInBytes();
532 DCHECK_LT(slot_offset, static_cast<int32_t>(frame_size));
533 DCHECK_GT(slot_offset, 0);
534 (reinterpret_cast<int32_t*>(osr_data->memory))[slot_offset / sizeof(int32_t)] = vreg_value;
535 }
536 }
537
538 osr_data->native_pc = stack_map.GetNativePcOffset(kRuntimeISA) +
539 osr_method->GetEntryPoint();
540 VLOG(jit) << "Jumping to "
541 << method_name
542 << "@"
543 << std::hex << reinterpret_cast<uintptr_t>(osr_data->native_pc);
544 }
545 return osr_data;
546 }
547
MaybeDoOnStackReplacement(Thread * thread,ArtMethod * method,uint32_t dex_pc,int32_t dex_pc_offset,JValue * result)548 bool Jit::MaybeDoOnStackReplacement(Thread* thread,
549 ArtMethod* method,
550 uint32_t dex_pc,
551 int32_t dex_pc_offset,
552 JValue* result) {
553 Jit* jit = Runtime::Current()->GetJit();
554 if (jit == nullptr) {
555 return false;
556 }
557
558 if (UNLIKELY(__builtin_frame_address(0) < thread->GetStackEnd())) {
559 // Don't attempt to do an OSR if we are close to the stack limit. Since
560 // the interpreter frames are still on stack, OSR has the potential
561 // to stack overflow even for a simple loop.
562 // b/27094810.
563 return false;
564 }
565
566 // Get the actual Java method if this method is from a proxy class. The compiler
567 // and the JIT code cache do not expect methods from proxy classes.
568 method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
569
570 // Before allowing the jump, make sure no code is actively inspecting the method to avoid
571 // jumping from interpreter to OSR while e.g. single stepping. Note that we could selectively
572 // disable OSR when single stepping, but that's currently hard to know at this point.
573 // Currently, HaveLocalsChanged is not frame specific. It is possible to make it frame specific
574 // to allow OSR of frames that don't have any locals changed but it isn't worth the additional
575 // complexity.
576 if (Runtime::Current()->GetInstrumentation()->NeedsSlowInterpreterForMethod(thread, method) ||
577 Runtime::Current()->GetRuntimeCallbacks()->HaveLocalsChanged()) {
578 return false;
579 }
580
581 ShadowFrame* shadow_frame = thread->GetManagedStack()->GetTopShadowFrame();
582 OsrData* osr_data = jit->PrepareForOsr(method,
583 dex_pc + dex_pc_offset,
584 shadow_frame->GetVRegArgs(0));
585
586 if (osr_data == nullptr) {
587 return false;
588 }
589
590 {
591 thread->PopShadowFrame();
592 ManagedStack fragment;
593 thread->PushManagedStackFragment(&fragment);
594 (*art_quick_osr_stub)(osr_data->memory,
595 osr_data->frame_size,
596 osr_data->native_pc,
597 result,
598 method->GetShorty(),
599 thread);
600
601 if (UNLIKELY(thread->GetException() == Thread::GetDeoptimizationException())) {
602 thread->DeoptimizeWithDeoptimizationException(result);
603 }
604 thread->PopManagedStackFragment(fragment);
605 }
606 free(osr_data);
607 thread->PushShadowFrame(shadow_frame);
608 VLOG(jit) << "Done running OSR code for " << method->PrettyMethod();
609 return true;
610 }
611
AddMemoryUsage(ArtMethod * method,size_t bytes)612 void Jit::AddMemoryUsage(ArtMethod* method, size_t bytes) {
613 if (bytes > 4 * MB) {
614 LOG(INFO) << "Compiler allocated "
615 << PrettySize(bytes)
616 << " to compile "
617 << ArtMethod::PrettyMethod(method);
618 }
619 MutexLock mu(Thread::Current(), lock_);
620 memory_use_.AddValue(bytes);
621 }
622
NotifyZygoteCompilationDone()623 void Jit::NotifyZygoteCompilationDone() {
624 if (fd_methods_ == -1) {
625 return;
626 }
627
628 size_t offset = 0;
629 for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
630 const ImageHeader& header = space->GetImageHeader();
631 const ImageSection& section = header.GetMethodsSection();
632 // Because mremap works at page boundaries, we can only handle methods
633 // within a page range. For methods that falls above or below the range,
634 // the child processes will copy their contents to their private mapping
635 // in `child_mapping_methods`. See `MapBootImageMethods`.
636 uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), kPageSize);
637 uint8_t* page_end =
638 AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), kPageSize);
639 if (page_end > page_start) {
640 uint64_t capacity = page_end - page_start;
641 memcpy(zygote_mapping_methods_.Begin() + offset, page_start, capacity);
642 offset += capacity;
643 }
644 }
645
646 // Do an msync to ensure we are not affected by writes still being in caches.
647 if (msync(zygote_mapping_methods_.Begin(), fd_methods_size_, MS_SYNC) != 0) {
648 PLOG(WARNING) << "Failed to sync boot image methods memory";
649 code_cache_->GetZygoteMap()->SetCompilationState(ZygoteCompilationState::kNotifiedFailure);
650 return;
651 }
652
653 // We don't need the shared mapping anymore, and we need to drop it in case
654 // the file hasn't been sealed writable.
655 zygote_mapping_methods_ = MemMap::Invalid();
656
657 // Seal writes now. Zygote and children will map the memory private in order
658 // to write to it.
659 if (fcntl(fd_methods_, F_ADD_SEALS, F_SEAL_SEAL | F_SEAL_WRITE) == -1) {
660 PLOG(WARNING) << "Failed to seal boot image methods file descriptor";
661 code_cache_->GetZygoteMap()->SetCompilationState(ZygoteCompilationState::kNotifiedFailure);
662 return;
663 }
664
665 std::string error_str;
666 MemMap child_mapping_methods = MemMap::MapFile(
667 fd_methods_size_,
668 PROT_READ | PROT_WRITE,
669 MAP_PRIVATE,
670 fd_methods_,
671 /* start= */ 0,
672 /* low_4gb= */ false,
673 "boot-image-methods",
674 &error_str);
675
676 if (!child_mapping_methods.IsValid()) {
677 LOG(WARNING) << "Failed to create child mapping of boot image methods: " << error_str;
678 code_cache_->GetZygoteMap()->SetCompilationState(ZygoteCompilationState::kNotifiedFailure);
679 return;
680 }
681
682 // Ensure the contents are the same as before: there was a window between
683 // the memcpy and the sealing where other processes could have changed the
684 // contents.
685 // Note this would not be needed if we could have used F_SEAL_FUTURE_WRITE,
686 // see b/143833776.
687 offset = 0;
688 for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
689 const ImageHeader& header = space->GetImageHeader();
690 const ImageSection& section = header.GetMethodsSection();
691 // Because mremap works at page boundaries, we can only handle methods
692 // within a page range. For methods that falls above or below the range,
693 // the child processes will copy their contents to their private mapping
694 // in `child_mapping_methods`. See `MapBootImageMethods`.
695 uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), kPageSize);
696 uint8_t* page_end =
697 AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), kPageSize);
698 if (page_end > page_start) {
699 uint64_t capacity = page_end - page_start;
700 if (memcmp(child_mapping_methods.Begin() + offset, page_start, capacity) != 0) {
701 LOG(WARNING) << "Contents differ in boot image methods data";
702 code_cache_->GetZygoteMap()->SetCompilationState(
703 ZygoteCompilationState::kNotifiedFailure);
704 return;
705 }
706 offset += capacity;
707 }
708 }
709
710 // Future spawned processes don't need the fd anymore.
711 fd_methods_.reset();
712
713 // In order to have the zygote and children share the memory, we also remap
714 // the memory into the zygote process.
715 offset = 0;
716 for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
717 const ImageHeader& header = space->GetImageHeader();
718 const ImageSection& section = header.GetMethodsSection();
719 // Because mremap works at page boundaries, we can only handle methods
720 // within a page range. For methods that falls above or below the range,
721 // the child processes will copy their contents to their private mapping
722 // in `child_mapping_methods`. See `MapBootImageMethods`.
723 uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), kPageSize);
724 uint8_t* page_end =
725 AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), kPageSize);
726 if (page_end > page_start) {
727 uint64_t capacity = page_end - page_start;
728 if (mremap(child_mapping_methods.Begin() + offset,
729 capacity,
730 capacity,
731 MREMAP_FIXED | MREMAP_MAYMOVE,
732 page_start) == MAP_FAILED) {
733 // Failing to remap is safe as the process will just use the old
734 // contents.
735 PLOG(WARNING) << "Failed mremap of boot image methods of " << space->GetImageFilename();
736 }
737 offset += capacity;
738 }
739 }
740
741 LOG(INFO) << "Successfully notified child processes on sharing boot image methods";
742
743 // Mark that compilation of boot classpath is done, and memory can now be
744 // shared. Other processes will pick up this information.
745 code_cache_->GetZygoteMap()->SetCompilationState(ZygoteCompilationState::kNotifiedOk);
746
747 // The private mapping created for this process has been mremaped. We can
748 // reset it.
749 child_mapping_methods.Reset();
750 }
751
752 class ScopedCompilation {
753 public:
ScopedCompilation(ScopedCompilation && other)754 ScopedCompilation(ScopedCompilation&& other) noexcept :
755 jit_(other.jit_),
756 method_(other.method_),
757 compilation_kind_(other.compilation_kind_),
758 owns_compilation_(other.owns_compilation_) {
759 other.owns_compilation_ = false;
760 }
761
ScopedCompilation(Jit * jit,ArtMethod * method,CompilationKind compilation_kind)762 ScopedCompilation(Jit* jit, ArtMethod* method, CompilationKind compilation_kind)
763 : jit_(jit),
764 method_(method),
765 compilation_kind_(compilation_kind),
766 owns_compilation_(true) {
767 MutexLock mu(Thread::Current(), *Locks::jit_lock_);
768 // We don't want to enqueue any new tasks when thread pool has stopped. This simplifies
769 // the implementation of redefinition feature in jvmti.
770 if (jit_->GetThreadPool() == nullptr ||
771 !jit_->GetThreadPool()->HasStarted(Thread::Current()) ||
772 jit_->GetCodeCache()->IsMethodBeingCompiled(method_, compilation_kind_)) {
773 owns_compilation_ = false;
774 return;
775 }
776 jit_->GetCodeCache()->AddMethodBeingCompiled(method_, compilation_kind_);
777 }
778
OwnsCompilation() const779 bool OwnsCompilation() const {
780 return owns_compilation_;
781 }
782
~ScopedCompilation()783 ~ScopedCompilation() {
784 if (owns_compilation_) {
785 MutexLock mu(Thread::Current(), *Locks::jit_lock_);
786 jit_->GetCodeCache()->RemoveMethodBeingCompiled(method_, compilation_kind_);
787 }
788 }
789
790 private:
791 Jit* const jit_;
792 ArtMethod* const method_;
793 const CompilationKind compilation_kind_;
794 bool owns_compilation_;
795 };
796
797 class JitCompileTask final : public Task {
798 public:
799 enum class TaskKind {
800 kCompile,
801 kPreCompile,
802 };
803
JitCompileTask(ArtMethod * method,TaskKind task_kind,CompilationKind compilation_kind,ScopedCompilation && sc)804 JitCompileTask(ArtMethod* method,
805 TaskKind task_kind,
806 CompilationKind compilation_kind,
807 ScopedCompilation&& sc)
808 : method_(method),
809 kind_(task_kind),
810 compilation_kind_(compilation_kind),
811 scoped_compilation_(std::move(sc)) {
812 DCHECK(scoped_compilation_.OwnsCompilation());
813 DCHECK(!sc.OwnsCompilation());
814 }
815
Run(Thread * self)816 void Run(Thread* self) override {
817 {
818 ScopedObjectAccess soa(self);
819 switch (kind_) {
820 case TaskKind::kCompile:
821 case TaskKind::kPreCompile: {
822 Runtime::Current()->GetJit()->CompileMethodInternal(
823 method_,
824 self,
825 compilation_kind_,
826 /* prejit= */ (kind_ == TaskKind::kPreCompile));
827 break;
828 }
829 }
830 }
831 ProfileSaver::NotifyJitActivity();
832 }
833
Finalize()834 void Finalize() override {
835 delete this;
836 }
837
838 private:
839 ArtMethod* const method_;
840 const TaskKind kind_;
841 const CompilationKind compilation_kind_;
842 ScopedCompilation scoped_compilation_;
843
844 DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask);
845 };
846
GetProfileFile(const std::string & dex_location)847 static std::string GetProfileFile(const std::string& dex_location) {
848 // Hardcoded assumption where the profile file is.
849 // TODO(ngeoffray): this is brittle and we would need to change change if we
850 // wanted to do more eager JITting of methods in a profile. This is
851 // currently only for system server.
852 return dex_location + ".prof";
853 }
854
GetBootProfileFile(const std::string & profile)855 static std::string GetBootProfileFile(const std::string& profile) {
856 // The boot profile can be found next to the compilation profile, with a
857 // different extension.
858 return ReplaceFileExtension(profile, "bprof");
859 }
860
861 /**
862 * A JIT task to run after all profile compilation is done.
863 */
864 class JitDoneCompilingProfileTask final : public SelfDeletingTask {
865 public:
JitDoneCompilingProfileTask(const std::vector<const DexFile * > & dex_files)866 explicit JitDoneCompilingProfileTask(const std::vector<const DexFile*>& dex_files)
867 : dex_files_(dex_files) {}
868
Run(Thread * self ATTRIBUTE_UNUSED)869 void Run(Thread* self ATTRIBUTE_UNUSED) override {
870 // Madvise DONTNEED dex files now that we're done compiling methods.
871 for (const DexFile* dex_file : dex_files_) {
872 if (IsAddressKnownBackedByFileOrShared(dex_file->Begin())) {
873 int result = madvise(const_cast<uint8_t*>(AlignDown(dex_file->Begin(), kPageSize)),
874 RoundUp(dex_file->Size(), kPageSize),
875 MADV_DONTNEED);
876 if (result == -1) {
877 PLOG(WARNING) << "Madvise failed";
878 }
879 }
880 }
881 }
882
883 private:
884 std::vector<const DexFile*> dex_files_;
885
886 DISALLOW_COPY_AND_ASSIGN(JitDoneCompilingProfileTask);
887 };
888
889 class JitZygoteDoneCompilingTask final : public SelfDeletingTask {
890 public:
JitZygoteDoneCompilingTask()891 JitZygoteDoneCompilingTask() {}
892
Run(Thread * self ATTRIBUTE_UNUSED)893 void Run(Thread* self ATTRIBUTE_UNUSED) override {
894 DCHECK(Runtime::Current()->IsZygote());
895 Runtime::Current()->GetJit()->GetCodeCache()->GetZygoteMap()->SetCompilationState(
896 ZygoteCompilationState::kDone);
897 }
898
899 private:
900 DISALLOW_COPY_AND_ASSIGN(JitZygoteDoneCompilingTask);
901 };
902
903 /**
904 * A JIT task to run Java verification of boot classpath classes that were not
905 * verified at compile-time.
906 */
907 class ZygoteVerificationTask final : public Task {
908 public:
ZygoteVerificationTask()909 ZygoteVerificationTask() {}
910
Run(Thread * self)911 void Run(Thread* self) override {
912 // We are going to load class and run verification, which may also need to load
913 // classes. If the thread cannot load classes (typically when the runtime is
914 // debuggable), then just return.
915 if (!self->CanLoadClasses()) {
916 return;
917 }
918 Runtime* runtime = Runtime::Current();
919 ClassLinker* linker = runtime->GetClassLinker();
920 const std::vector<const DexFile*>& boot_class_path =
921 runtime->GetClassLinker()->GetBootClassPath();
922 ScopedObjectAccess soa(self);
923 StackHandleScope<1> hs(self);
924 MutableHandle<mirror::Class> klass = hs.NewHandle<mirror::Class>(nullptr);
925 uint64_t start_ns = ThreadCpuNanoTime();
926 uint64_t number_of_classes = 0;
927 for (const DexFile* dex_file : boot_class_path) {
928 for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
929 const dex::ClassDef& class_def = dex_file->GetClassDef(i);
930 const char* descriptor = dex_file->GetClassDescriptor(class_def);
931 klass.Assign(linker->LookupResolvedType(descriptor, /* class_loader= */ nullptr));
932 if (klass == nullptr) {
933 // Class not loaded yet.
934 DCHECK(!self->IsExceptionPending());
935 continue;
936 }
937 if (klass->IsVerified()) {
938 continue;
939 }
940 if (linker->VerifyClass(self, /* verifier_deps= */ nullptr, klass) ==
941 verifier::FailureKind::kHardFailure) {
942 CHECK(self->IsExceptionPending());
943 LOG(WARNING) << "Methods in the boot classpath failed to verify: "
944 << self->GetException()->Dump();
945 self->ClearException();
946 } else {
947 ++number_of_classes;
948 }
949 CHECK(!self->IsExceptionPending());
950 }
951 }
952 LOG(INFO) << "Background verification of "
953 << number_of_classes
954 << " classes from boot classpath took "
955 << PrettyDuration(ThreadCpuNanoTime() - start_ns);
956 }
957 };
958
959 class ZygoteTask final : public Task {
960 public:
ZygoteTask()961 ZygoteTask() {}
962
Run(Thread * self)963 void Run(Thread* self) override {
964 Runtime* runtime = Runtime::Current();
965 uint32_t added_to_queue = 0;
966 for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
967 const std::vector<const DexFile*>& boot_class_path =
968 runtime->GetClassLinker()->GetBootClassPath();
969 ScopedNullHandle<mirror::ClassLoader> null_handle;
970 // We avoid doing compilation at boot for the secondary zygote, as apps forked from it are not
971 // critical for boot.
972 if (Runtime::Current()->IsPrimaryZygote()) {
973 for (const std::string& profile_file : space->GetProfileFiles()) {
974 std::string boot_profile = GetBootProfileFile(profile_file);
975 LOG(INFO) << "JIT Zygote looking at boot profile " << boot_profile;
976
977 // We add to the queue for zygote so that we can fork processes in-between compilations.
978 added_to_queue += runtime->GetJit()->CompileMethodsFromBootProfile(
979 self, boot_class_path, boot_profile, null_handle, /* add_to_queue= */ true);
980 }
981 }
982 for (const std::string& profile_file : space->GetProfileFiles()) {
983 LOG(INFO) << "JIT Zygote looking at profile " << profile_file;
984
985 added_to_queue += runtime->GetJit()->CompileMethodsFromProfile(
986 self, boot_class_path, profile_file, null_handle, /* add_to_queue= */ true);
987 }
988 }
989 DCHECK(runtime->GetJit()->InZygoteUsingJit());
990 runtime->GetJit()->AddPostBootTask(self, new JitZygoteDoneCompilingTask());
991
992 JitCodeCache* code_cache = runtime->GetJit()->GetCodeCache();
993 code_cache->GetZygoteMap()->Initialize(added_to_queue);
994 }
995
Finalize()996 void Finalize() override {
997 delete this;
998 }
999
1000 private:
1001 DISALLOW_COPY_AND_ASSIGN(ZygoteTask);
1002 };
1003
1004 class JitProfileTask final : public Task {
1005 public:
JitProfileTask(const std::vector<std::unique_ptr<const DexFile>> & dex_files,jobject class_loader)1006 JitProfileTask(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
1007 jobject class_loader) {
1008 ScopedObjectAccess soa(Thread::Current());
1009 StackHandleScope<1> hs(soa.Self());
1010 Handle<mirror::ClassLoader> h_loader(hs.NewHandle(
1011 soa.Decode<mirror::ClassLoader>(class_loader)));
1012 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1013 for (const auto& dex_file : dex_files) {
1014 dex_files_.push_back(dex_file.get());
1015 // Register the dex file so that we can guarantee it doesn't get deleted
1016 // while reading it during the task.
1017 class_linker->RegisterDexFile(*dex_file.get(), h_loader.Get());
1018 }
1019 // We also create our own global ref to use this class loader later.
1020 class_loader_ = soa.Vm()->AddGlobalRef(soa.Self(), h_loader.Get());
1021 }
1022
Run(Thread * self)1023 void Run(Thread* self) override {
1024 ScopedObjectAccess soa(self);
1025 StackHandleScope<1> hs(self);
1026 Handle<mirror::ClassLoader> loader = hs.NewHandle<mirror::ClassLoader>(
1027 soa.Decode<mirror::ClassLoader>(class_loader_));
1028
1029 std::string profile = GetProfileFile(dex_files_[0]->GetLocation());
1030 std::string boot_profile = GetBootProfileFile(profile);
1031
1032 Jit* jit = Runtime::Current()->GetJit();
1033
1034 jit->CompileMethodsFromBootProfile(
1035 self,
1036 dex_files_,
1037 boot_profile,
1038 loader,
1039 /* add_to_queue= */ false);
1040
1041 jit->CompileMethodsFromProfile(
1042 self,
1043 dex_files_,
1044 profile,
1045 loader,
1046 /* add_to_queue= */ true);
1047 }
1048
Finalize()1049 void Finalize() override {
1050 delete this;
1051 }
1052
~JitProfileTask()1053 ~JitProfileTask() {
1054 ScopedObjectAccess soa(Thread::Current());
1055 soa.Vm()->DeleteGlobalRef(soa.Self(), class_loader_);
1056 }
1057
1058 private:
1059 std::vector<const DexFile*> dex_files_;
1060 jobject class_loader_;
1061
1062 DISALLOW_COPY_AND_ASSIGN(JitProfileTask);
1063 };
1064
CopyIfDifferent(void * s1,const void * s2,size_t n)1065 static void CopyIfDifferent(void* s1, const void* s2, size_t n) {
1066 if (memcmp(s1, s2, n) != 0) {
1067 memcpy(s1, s2, n);
1068 }
1069 }
1070
MapBootImageMethods()1071 void Jit::MapBootImageMethods() {
1072 if (Runtime::Current()->IsJavaDebuggable()) {
1073 LOG(INFO) << "Not mapping boot image methods due to process being debuggable";
1074 return;
1075 }
1076 CHECK_NE(fd_methods_.get(), -1);
1077 if (!code_cache_->GetZygoteMap()->CanMapBootImageMethods()) {
1078 LOG(WARNING) << "Not mapping boot image methods due to error from zygote";
1079 // We don't need the fd anymore.
1080 fd_methods_.reset();
1081 return;
1082 }
1083
1084 std::string error_str;
1085 MemMap child_mapping_methods = MemMap::MapFile(
1086 fd_methods_size_,
1087 PROT_READ | PROT_WRITE,
1088 MAP_PRIVATE,
1089 fd_methods_,
1090 /* start= */ 0,
1091 /* low_4gb= */ false,
1092 "boot-image-methods",
1093 &error_str);
1094
1095 // We don't need the fd anymore.
1096 fd_methods_.reset();
1097
1098 if (!child_mapping_methods.IsValid()) {
1099 LOG(WARNING) << "Failed to create child mapping of boot image methods: " << error_str;
1100 return;
1101 }
1102 // We are going to mremap the child mapping into the image:
1103 //
1104 // ImageSection ChildMappingMethods
1105 //
1106 // section start --> -----------
1107 // | |
1108 // | |
1109 // page_start --> | | <----- -----------
1110 // | | | |
1111 // | | | |
1112 // | | | |
1113 // | | | |
1114 // | | | |
1115 // | | | |
1116 // | | | |
1117 // page_end --> | | <----- -----------
1118 // | |
1119 // section end --> -----------
1120 //
1121 size_t offset = 0;
1122 for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
1123 const ImageHeader& header = space->GetImageHeader();
1124 const ImageSection& section = header.GetMethodsSection();
1125 uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), kPageSize);
1126 uint8_t* page_end =
1127 AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), kPageSize);
1128 if (page_end <= page_start) {
1129 // Section doesn't contain one aligned entire page.
1130 continue;
1131 }
1132 uint64_t capacity = page_end - page_start;
1133 // Walk over methods in the boot image, and check for:
1134 // 1) methods whose class is not initialized in the process, but are in the
1135 // zygote process. For such methods, we need their entrypoints to be stubs
1136 // that do the initialization check.
1137 // 2) native methods whose data pointer is different than the one in the
1138 // zygote. Such methods may have had custom native implementation provided
1139 // by JNI RegisterNatives.
1140 header.VisitPackedArtMethods([&](ArtMethod& method) NO_THREAD_SAFETY_ANALYSIS {
1141 // Methods in the boot image should never have their single
1142 // implementation flag set (and therefore never have a `data_` pointing
1143 // to an ArtMethod for single implementation).
1144 CHECK(method.IsIntrinsic() || !method.HasSingleImplementationFlag());
1145 if (method.IsRuntimeMethod()) {
1146 return;
1147 }
1148
1149 // Pointer to the method we're currently using.
1150 uint8_t* pointer = reinterpret_cast<uint8_t*>(&method);
1151 // The data pointer of that method that we want to keep.
1152 uint8_t* data_pointer = pointer + ArtMethod::DataOffset(kRuntimePointerSize).Int32Value();
1153 if (method.IsNative() && data_pointer >= page_start && data_pointer < page_end) {
1154 // The data pointer of the ArtMethod in the shared memory we are going to remap into our
1155 // own mapping. This is the data that we will see after the remap.
1156 uint8_t* new_data_pointer =
1157 child_mapping_methods.Begin() + offset + (data_pointer - page_start);
1158 CopyIfDifferent(new_data_pointer, data_pointer, sizeof(void*));
1159 }
1160
1161 // The entrypoint of the method we're currently using and that we want to
1162 // keep.
1163 uint8_t* entry_point_pointer = pointer +
1164 ArtMethod::EntryPointFromQuickCompiledCodeOffset(kRuntimePointerSize).Int32Value();
1165 if (!method.GetDeclaringClassUnchecked()->IsVisiblyInitialized() &&
1166 method.IsStatic() &&
1167 !method.IsConstructor() &&
1168 entry_point_pointer >= page_start &&
1169 entry_point_pointer < page_end) {
1170 // The entry point of the ArtMethod in the shared memory we are going to remap into our
1171 // own mapping. This is the entrypoint that we will see after the remap.
1172 uint8_t* new_entry_point_pointer =
1173 child_mapping_methods.Begin() + offset + (entry_point_pointer - page_start);
1174 CopyIfDifferent(new_entry_point_pointer, entry_point_pointer, sizeof(void*));
1175 }
1176 }, space->Begin(), kRuntimePointerSize);
1177
1178 // Map the memory in the boot image range.
1179 if (mremap(child_mapping_methods.Begin() + offset,
1180 capacity,
1181 capacity,
1182 MREMAP_FIXED | MREMAP_MAYMOVE,
1183 page_start) == MAP_FAILED) {
1184 PLOG(WARNING) << "Fail to mremap boot image methods for " << space->GetImageFilename();
1185 }
1186 offset += capacity;
1187 }
1188
1189 // The private mapping created for this process has been mremaped. We can
1190 // reset it.
1191 child_mapping_methods.Reset();
1192 LOG(INFO) << "Successfully mapped boot image methods";
1193 }
1194
InZygoteUsingJit()1195 bool Jit::InZygoteUsingJit() {
1196 Runtime* runtime = Runtime::Current();
1197 return runtime->IsZygote() && runtime->HasImageWithProfile() && runtime->UseJitCompilation();
1198 }
1199
CreateThreadPool()1200 void Jit::CreateThreadPool() {
1201 // There is a DCHECK in the 'AddSamples' method to ensure the tread pool
1202 // is not null when we instrument.
1203
1204 // We need peers as we may report the JIT thread, e.g., in the debugger.
1205 constexpr bool kJitPoolNeedsPeers = true;
1206 thread_pool_.reset(new ThreadPool("Jit thread pool", 1, kJitPoolNeedsPeers));
1207
1208 Runtime* runtime = Runtime::Current();
1209 thread_pool_->SetPthreadPriority(
1210 runtime->IsZygote()
1211 ? options_->GetZygoteThreadPoolPthreadPriority()
1212 : options_->GetThreadPoolPthreadPriority());
1213 Start();
1214
1215 if (runtime->IsZygote()) {
1216 // To speed up class lookups, generate a type lookup table for
1217 // dex files not backed by oat file.
1218 for (const DexFile* dex_file : runtime->GetClassLinker()->GetBootClassPath()) {
1219 if (dex_file->GetOatDexFile() == nullptr) {
1220 TypeLookupTable type_lookup_table = TypeLookupTable::Create(*dex_file);
1221 type_lookup_tables_.push_back(
1222 std::make_unique<art::OatDexFile>(std::move(type_lookup_table)));
1223 dex_file->SetOatDexFile(type_lookup_tables_.back().get());
1224 }
1225 }
1226
1227 // Add a task that will verify boot classpath jars that were not
1228 // pre-compiled.
1229 thread_pool_->AddTask(Thread::Current(), new ZygoteVerificationTask());
1230 }
1231
1232 if (InZygoteUsingJit()) {
1233 // If we have an image with a profile, request a JIT task to
1234 // compile all methods in that profile.
1235 thread_pool_->AddTask(Thread::Current(), new ZygoteTask());
1236
1237 // And create mappings to share boot image methods memory from the zygote to
1238 // child processes.
1239
1240 // Compute the total capacity required for the boot image methods.
1241 uint64_t total_capacity = 0;
1242 for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
1243 const ImageHeader& header = space->GetImageHeader();
1244 const ImageSection& section = header.GetMethodsSection();
1245 // Mappings need to be at the page level.
1246 uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), kPageSize);
1247 uint8_t* page_end =
1248 AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), kPageSize);
1249 if (page_end > page_start) {
1250 total_capacity += (page_end - page_start);
1251 }
1252 }
1253
1254 // Create the child and zygote mappings to the boot image methods.
1255 if (total_capacity > 0) {
1256 // Start with '/boot' and end with '.art' to match the pattern recognized
1257 // by android_os_Debug.cpp for boot images.
1258 const char* name = "/boot-image-methods.art";
1259 unique_fd mem_fd =
1260 unique_fd(art::memfd_create(name, /* flags= */ MFD_ALLOW_SEALING | MFD_CLOEXEC));
1261 if (mem_fd.get() == -1) {
1262 PLOG(WARNING) << "Could not create boot image methods file descriptor";
1263 return;
1264 }
1265 if (ftruncate(mem_fd.get(), total_capacity) != 0) {
1266 PLOG(WARNING) << "Failed to truncate boot image methods file to " << total_capacity;
1267 return;
1268 }
1269 std::string error_str;
1270
1271 // Create the shared mapping eagerly, as this prevents other processes
1272 // from adding the writable seal.
1273 zygote_mapping_methods_ = MemMap::MapFile(
1274 total_capacity,
1275 PROT_READ | PROT_WRITE,
1276 MAP_SHARED,
1277 mem_fd,
1278 /* start= */ 0,
1279 /* low_4gb= */ false,
1280 "boot-image-methods",
1281 &error_str);
1282
1283 if (!zygote_mapping_methods_.IsValid()) {
1284 LOG(WARNING) << "Failed to create zygote mapping of boot image methods: " << error_str;
1285 return;
1286 }
1287 if (zygote_mapping_methods_.MadviseDontFork() != 0) {
1288 LOG(WARNING) << "Failed to madvise dont fork boot image methods";
1289 zygote_mapping_methods_ = MemMap();
1290 return;
1291 }
1292
1293 // We should use the F_SEAL_FUTURE_WRITE flag, but this has unexpected
1294 // behavior on private mappings after fork (the mapping becomes shared between
1295 // parent and children), see b/143833776.
1296 // We will seal the write once we are done writing to the shared mapping.
1297 if (fcntl(mem_fd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW) == -1) {
1298 PLOG(WARNING) << "Failed to seal boot image methods file descriptor";
1299 zygote_mapping_methods_ = MemMap();
1300 return;
1301 }
1302 fd_methods_ = unique_fd(mem_fd.release());
1303 fd_methods_size_ = total_capacity;
1304 }
1305 }
1306 }
1307
RegisterDexFiles(const std::vector<std::unique_ptr<const DexFile>> & dex_files,jobject class_loader)1308 void Jit::RegisterDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
1309 jobject class_loader) {
1310 if (dex_files.empty()) {
1311 return;
1312 }
1313 Runtime* runtime = Runtime::Current();
1314 // If the runtime is debuggable, don't bother precompiling methods.
1315 // If system server is being profiled, don't precompile as we are going to use
1316 // the JIT to count hotness. Note that --count-hotness-in-compiled-code is
1317 // only forced when we also profile the boot classpath, see
1318 // AndroidRuntime.cpp.
1319 if (runtime->IsSystemServer() &&
1320 UseJitCompilation() &&
1321 options_->UseProfiledJitCompilation() &&
1322 runtime->HasImageWithProfile() &&
1323 !runtime->IsSystemServerProfiled() &&
1324 !runtime->IsJavaDebuggable()) {
1325 // Note: this precompilation is currently not running in production because:
1326 // - UseProfiledJitCompilation() is not set by default.
1327 // - System server dex files are registered *before* we set the runtime as
1328 // system server (though we are in the system server process).
1329 thread_pool_->AddTask(Thread::Current(), new JitProfileTask(dex_files, class_loader));
1330 }
1331 }
1332
AddCompileTask(Thread * self,ArtMethod * method,CompilationKind compilation_kind,bool precompile)1333 void Jit::AddCompileTask(Thread* self,
1334 ArtMethod* method,
1335 CompilationKind compilation_kind,
1336 bool precompile) {
1337 ScopedCompilation sc(this, method, compilation_kind);
1338 if (!sc.OwnsCompilation()) {
1339 return;
1340 }
1341 JitCompileTask::TaskKind task_kind = precompile
1342 ? JitCompileTask::TaskKind::kPreCompile
1343 : JitCompileTask::TaskKind::kCompile;
1344 thread_pool_->AddTask(
1345 self, new JitCompileTask(method, task_kind, compilation_kind, std::move(sc)));
1346 }
1347
CompileMethodFromProfile(Thread * self,ClassLinker * class_linker,uint32_t method_idx,Handle<mirror::DexCache> dex_cache,Handle<mirror::ClassLoader> class_loader,bool add_to_queue,bool compile_after_boot)1348 bool Jit::CompileMethodFromProfile(Thread* self,
1349 ClassLinker* class_linker,
1350 uint32_t method_idx,
1351 Handle<mirror::DexCache> dex_cache,
1352 Handle<mirror::ClassLoader> class_loader,
1353 bool add_to_queue,
1354 bool compile_after_boot) {
1355 ArtMethod* method = class_linker->ResolveMethodWithoutInvokeType(
1356 method_idx, dex_cache, class_loader);
1357 if (method == nullptr) {
1358 self->ClearException();
1359 return false;
1360 }
1361 if (!method->IsCompilable() || !method->IsInvokable()) {
1362 return false;
1363 }
1364 if (method->IsPreCompiled()) {
1365 // Already seen by another profile.
1366 return false;
1367 }
1368 CompilationKind compilation_kind = CompilationKind::kOptimized;
1369 const void* entry_point = method->GetEntryPointFromQuickCompiledCode();
1370 if (class_linker->IsQuickToInterpreterBridge(entry_point) ||
1371 class_linker->IsQuickGenericJniStub(entry_point) ||
1372 class_linker->IsNterpEntryPoint(entry_point) ||
1373 // We explicitly check for the resolution stub, and not the resolution trampoline.
1374 // The trampoline is for methods backed by a .oat file that has a compiled version of
1375 // the method.
1376 (entry_point == GetQuickResolutionStub())) {
1377 VLOG(jit) << "JIT Zygote processing method " << ArtMethod::PrettyMethod(method)
1378 << " from profile";
1379 method->SetPreCompiled();
1380 ScopedCompilation sc(this, method, compilation_kind);
1381 if (!sc.OwnsCompilation()) {
1382 return false;
1383 }
1384 if (!add_to_queue) {
1385 CompileMethodInternal(method, self, compilation_kind, /* prejit= */ true);
1386 } else {
1387 Task* task = new JitCompileTask(
1388 method, JitCompileTask::TaskKind::kPreCompile, compilation_kind, std::move(sc));
1389 if (compile_after_boot) {
1390 AddPostBootTask(self, task);
1391 } else {
1392 thread_pool_->AddTask(self, task);
1393 }
1394 return true;
1395 }
1396 }
1397 return false;
1398 }
1399
CompileMethodsFromBootProfile(Thread * self,const std::vector<const DexFile * > & dex_files,const std::string & profile_file,Handle<mirror::ClassLoader> class_loader,bool add_to_queue)1400 uint32_t Jit::CompileMethodsFromBootProfile(
1401 Thread* self,
1402 const std::vector<const DexFile*>& dex_files,
1403 const std::string& profile_file,
1404 Handle<mirror::ClassLoader> class_loader,
1405 bool add_to_queue) {
1406 unix_file::FdFile profile(profile_file, O_RDONLY, true);
1407
1408 if (profile.Fd() == -1) {
1409 PLOG(WARNING) << "No boot profile: " << profile_file;
1410 return 0u;
1411 }
1412
1413 ProfileBootInfo profile_info;
1414 if (!profile_info.Load(profile.Fd(), dex_files)) {
1415 LOG(ERROR) << "Could not load profile file: " << profile_file;
1416 return 0u;
1417 }
1418
1419 ScopedObjectAccess soa(self);
1420 VariableSizedHandleScope handles(self);
1421 std::vector<Handle<mirror::DexCache>> dex_caches;
1422 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1423 for (const DexFile* dex_file : profile_info.GetDexFiles()) {
1424 dex_caches.push_back(handles.NewHandle(class_linker->FindDexCache(self, *dex_file)));
1425 }
1426
1427 uint32_t added_to_queue = 0;
1428 for (const std::pair<uint32_t, uint32_t>& pair : profile_info.GetMethods()) {
1429 if (CompileMethodFromProfile(self,
1430 class_linker,
1431 pair.second,
1432 dex_caches[pair.first],
1433 class_loader,
1434 add_to_queue,
1435 /*compile_after_boot=*/false)) {
1436 ++added_to_queue;
1437 }
1438 }
1439 return added_to_queue;
1440 }
1441
CompileMethodsFromProfile(Thread * self,const std::vector<const DexFile * > & dex_files,const std::string & profile_file,Handle<mirror::ClassLoader> class_loader,bool add_to_queue)1442 uint32_t Jit::CompileMethodsFromProfile(
1443 Thread* self,
1444 const std::vector<const DexFile*>& dex_files,
1445 const std::string& profile_file,
1446 Handle<mirror::ClassLoader> class_loader,
1447 bool add_to_queue) {
1448
1449 if (profile_file.empty()) {
1450 LOG(WARNING) << "Expected a profile file in JIT zygote mode";
1451 return 0u;
1452 }
1453
1454 // We don't generate boot profiles on device, therefore we don't
1455 // need to lock the file.
1456 unix_file::FdFile profile(profile_file, O_RDONLY, true);
1457
1458 if (profile.Fd() == -1) {
1459 PLOG(WARNING) << "No profile: " << profile_file;
1460 return 0u;
1461 }
1462
1463 ProfileCompilationInfo profile_info(/* for_boot_image= */ class_loader.IsNull());
1464 if (!profile_info.Load(profile.Fd())) {
1465 LOG(ERROR) << "Could not load profile file";
1466 return 0u;
1467 }
1468 ScopedObjectAccess soa(self);
1469 StackHandleScope<1> hs(self);
1470 MutableHandle<mirror::DexCache> dex_cache = hs.NewHandle<mirror::DexCache>(nullptr);
1471 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1472 uint32_t added_to_queue = 0u;
1473 for (const DexFile* dex_file : dex_files) {
1474 std::set<dex::TypeIndex> class_types;
1475 std::set<uint16_t> all_methods;
1476 if (!profile_info.GetClassesAndMethods(*dex_file,
1477 &class_types,
1478 &all_methods,
1479 &all_methods,
1480 &all_methods)) {
1481 // This means the profile file did not reference the dex file, which is the case
1482 // if there's no classes and methods of that dex file in the profile.
1483 continue;
1484 }
1485 dex_cache.Assign(class_linker->FindDexCache(self, *dex_file));
1486 CHECK(dex_cache != nullptr) << "Could not find dex cache for " << dex_file->GetLocation();
1487
1488 for (uint16_t method_idx : all_methods) {
1489 if (CompileMethodFromProfile(self,
1490 class_linker,
1491 method_idx,
1492 dex_cache,
1493 class_loader,
1494 add_to_queue,
1495 /*compile_after_boot=*/true)) {
1496 ++added_to_queue;
1497 }
1498 }
1499 }
1500
1501 // Add a task to run when all compilation is done.
1502 AddPostBootTask(self, new JitDoneCompilingProfileTask(dex_files));
1503 return added_to_queue;
1504 }
1505
IgnoreSamplesForMethod(ArtMethod * method)1506 bool Jit::IgnoreSamplesForMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
1507 if (method->IsClassInitializer() || !method->IsCompilable()) {
1508 // We do not want to compile such methods.
1509 return true;
1510 }
1511 if (method->IsNative()) {
1512 ObjPtr<mirror::Class> klass = method->GetDeclaringClass();
1513 if (klass == GetClassRoot<mirror::MethodHandle>() ||
1514 klass == GetClassRoot<mirror::VarHandle>()) {
1515 // MethodHandle and VarHandle invocation methods are required to throw an
1516 // UnsupportedOperationException if invoked reflectively. We achieve this by having native
1517 // implementations that raise the exception. We need to disable JIT compilation of these JNI
1518 // methods as it can lead to transitioning between JIT compiled JNI stubs and generic JNI
1519 // stubs. Since these stubs have different stack representations we can then crash in stack
1520 // walking (b/78151261).
1521 return true;
1522 }
1523 }
1524 return false;
1525 }
1526
EnqueueOptimizedCompilation(ArtMethod * method,Thread * self)1527 void Jit::EnqueueOptimizedCompilation(ArtMethod* method, Thread* self) {
1528 // Reset the hotness counter so the baseline compiled code doesn't call this
1529 // method repeatedly.
1530 GetCodeCache()->ResetHotnessCounter(method, self);
1531
1532 if (thread_pool_ == nullptr) {
1533 return;
1534 }
1535 // We arrive here after a baseline compiled code has reached its baseline
1536 // hotness threshold. If we're not only using the baseline compiler, enqueue a compilation
1537 // task that will compile optimize the method.
1538 if (!options_->UseBaselineCompiler()) {
1539 AddCompileTask(self, method, CompilationKind::kOptimized);
1540 }
1541 }
1542
1543 class ScopedSetRuntimeThread {
1544 public:
ScopedSetRuntimeThread(Thread * self)1545 explicit ScopedSetRuntimeThread(Thread* self)
1546 : self_(self), was_runtime_thread_(self_->IsRuntimeThread()) {
1547 self_->SetIsRuntimeThread(true);
1548 }
1549
~ScopedSetRuntimeThread()1550 ~ScopedSetRuntimeThread() {
1551 self_->SetIsRuntimeThread(was_runtime_thread_);
1552 }
1553
1554 private:
1555 Thread* self_;
1556 bool was_runtime_thread_;
1557 };
1558
MethodEntered(Thread * self,ArtMethod * method)1559 void Jit::MethodEntered(Thread* self, ArtMethod* method) {
1560 Runtime* runtime = Runtime::Current();
1561 if (UNLIKELY(runtime->UseJitCompilation() && JitAtFirstUse())) {
1562 ArtMethod* np_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
1563 if (np_method->IsCompilable()) {
1564 CompileMethod(method, self, CompilationKind::kOptimized, /* prejit= */ false);
1565 }
1566 return;
1567 }
1568
1569 AddSamples(self, method);
1570 }
1571
WaitForCompilationToFinish(Thread * self)1572 void Jit::WaitForCompilationToFinish(Thread* self) {
1573 if (thread_pool_ != nullptr) {
1574 thread_pool_->Wait(self, false, false);
1575 }
1576 }
1577
Stop()1578 void Jit::Stop() {
1579 Thread* self = Thread::Current();
1580 // TODO(ngeoffray): change API to not require calling WaitForCompilationToFinish twice.
1581 WaitForCompilationToFinish(self);
1582 GetThreadPool()->StopWorkers(self);
1583 WaitForCompilationToFinish(self);
1584 }
1585
Start()1586 void Jit::Start() {
1587 GetThreadPool()->StartWorkers(Thread::Current());
1588 }
1589
ScopedJitSuspend()1590 ScopedJitSuspend::ScopedJitSuspend() {
1591 jit::Jit* jit = Runtime::Current()->GetJit();
1592 was_on_ = (jit != nullptr) && (jit->GetThreadPool() != nullptr);
1593 if (was_on_) {
1594 jit->Stop();
1595 }
1596 }
1597
~ScopedJitSuspend()1598 ScopedJitSuspend::~ScopedJitSuspend() {
1599 if (was_on_) {
1600 DCHECK(Runtime::Current()->GetJit() != nullptr);
1601 DCHECK(Runtime::Current()->GetJit()->GetThreadPool() != nullptr);
1602 Runtime::Current()->GetJit()->Start();
1603 }
1604 }
1605
RunPollingThread(void * arg)1606 static void* RunPollingThread(void* arg) {
1607 Jit* jit = reinterpret_cast<Jit*>(arg);
1608 do {
1609 sleep(10);
1610 } while (!jit->GetCodeCache()->GetZygoteMap()->IsCompilationNotified());
1611
1612 // We will suspend other threads: we can only do that if we're attached to the
1613 // runtime.
1614 Runtime* runtime = Runtime::Current();
1615 bool thread_attached = runtime->AttachCurrentThread(
1616 "BootImagePollingThread",
1617 /* as_daemon= */ true,
1618 /* thread_group= */ nullptr,
1619 /* create_peer= */ false);
1620 CHECK(thread_attached);
1621
1622 {
1623 // Prevent other threads from running while we are remapping the boot image
1624 // ArtMethod's. Native threads might still be running, but they cannot
1625 // change the contents of ArtMethod's.
1626 ScopedSuspendAll ssa(__FUNCTION__);
1627 runtime->GetJit()->MapBootImageMethods();
1628 }
1629
1630 Runtime::Current()->DetachCurrentThread();
1631 return nullptr;
1632 }
1633
PostForkChildAction(bool is_system_server,bool is_zygote)1634 void Jit::PostForkChildAction(bool is_system_server, bool is_zygote) {
1635 // Clear the potential boot tasks inherited from the zygote.
1636 {
1637 MutexLock mu(Thread::Current(), boot_completed_lock_);
1638 tasks_after_boot_.clear();
1639 }
1640
1641 Runtime* const runtime = Runtime::Current();
1642 // Check if we'll need to remap the boot image methods.
1643 if (!is_zygote && fd_methods_ != -1) {
1644 // Create a thread that will poll the status of zygote compilation, and map
1645 // the private mapping of boot image methods.
1646 // For child zygote, we instead query IsCompilationNotified() post zygote fork.
1647 zygote_mapping_methods_.ResetInForkedProcess();
1648 pthread_t polling_thread;
1649 pthread_attr_t attr;
1650 CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread");
1651 CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED),
1652 "PTHREAD_CREATE_DETACHED");
1653 CHECK_PTHREAD_CALL(
1654 pthread_create,
1655 (&polling_thread, &attr, RunPollingThread, reinterpret_cast<void*>(this)),
1656 "Methods maps thread");
1657 }
1658
1659 if (is_zygote || runtime->IsSafeMode()) {
1660 // Delete the thread pool, we are not going to JIT.
1661 thread_pool_.reset(nullptr);
1662 return;
1663 }
1664 // At this point, the compiler options have been adjusted to the particular configuration
1665 // of the forked child. Parse them again.
1666 jit_compiler_->ParseCompilerOptions();
1667
1668 // Adjust the status of code cache collection: the status from zygote was to not collect.
1669 // JitAtFirstUse compiles the methods synchronously on mutator threads. While this should work
1670 // in theory it is causing deadlocks in some jvmti tests related to Jit GC. Hence, disabling
1671 // Jit GC for now (b/147208992).
1672 code_cache_->SetGarbageCollectCode(
1673 !jit_compiler_->GenerateDebugInfo() &&
1674 !JitAtFirstUse());
1675
1676 if (is_system_server && runtime->HasImageWithProfile()) {
1677 // Disable garbage collection: we don't want it to delete methods we're compiling
1678 // through boot and system server profiles.
1679 // TODO(ngeoffray): Fix this so we still collect deoptimized and unused code.
1680 code_cache_->SetGarbageCollectCode(false);
1681 }
1682
1683 // We do this here instead of PostZygoteFork, as NativeDebugInfoPostFork only
1684 // applies to a child.
1685 NativeDebugInfoPostFork();
1686 }
1687
PreZygoteFork()1688 void Jit::PreZygoteFork() {
1689 if (thread_pool_ == nullptr) {
1690 return;
1691 }
1692 thread_pool_->DeleteThreads();
1693
1694 NativeDebugInfoPreFork();
1695 }
1696
PostZygoteFork()1697 void Jit::PostZygoteFork() {
1698 Runtime* runtime = Runtime::Current();
1699 if (thread_pool_ == nullptr) {
1700 // If this is a child zygote, check if we need to remap the boot image
1701 // methods.
1702 if (runtime->IsZygote() &&
1703 fd_methods_ != -1 &&
1704 code_cache_->GetZygoteMap()->IsCompilationNotified()) {
1705 ScopedSuspendAll ssa(__FUNCTION__);
1706 MapBootImageMethods();
1707 }
1708 return;
1709 }
1710 if (runtime->IsZygote() && code_cache_->GetZygoteMap()->IsCompilationDoneButNotNotified()) {
1711 // Copy the boot image methods data to the mappings we created to share
1712 // with the children. We do this here as we are the only thread running and
1713 // we don't risk other threads concurrently updating the ArtMethod's.
1714 CHECK_EQ(GetTaskCount(), 1);
1715 NotifyZygoteCompilationDone();
1716 CHECK(code_cache_->GetZygoteMap()->IsCompilationNotified());
1717 }
1718 thread_pool_->CreateThreads();
1719 thread_pool_->SetPthreadPriority(
1720 runtime->IsZygote()
1721 ? options_->GetZygoteThreadPoolPthreadPriority()
1722 : options_->GetThreadPoolPthreadPriority());
1723 }
1724
AddPostBootTask(Thread * self,Task * task)1725 void Jit::AddPostBootTask(Thread* self, Task* task) {
1726 MutexLock mu(self, boot_completed_lock_);
1727 if (boot_completed_) {
1728 thread_pool_->AddTask(self, task);
1729 } else {
1730 tasks_after_boot_.push_back(task);
1731 }
1732 }
1733
BootCompleted()1734 void Jit::BootCompleted() {
1735 Thread* self = Thread::Current();
1736 std::deque<Task*> tasks;
1737 {
1738 MutexLock mu(self, boot_completed_lock_);
1739 tasks = std::move(tasks_after_boot_);
1740 boot_completed_ = true;
1741 }
1742 for (Task* task : tasks) {
1743 thread_pool_->AddTask(self, task);
1744 }
1745 }
1746
CanEncodeMethod(ArtMethod * method,bool is_for_shared_region) const1747 bool Jit::CanEncodeMethod(ArtMethod* method, bool is_for_shared_region) const {
1748 return !is_for_shared_region ||
1749 Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(method->GetDeclaringClass());
1750 }
1751
CanEncodeClass(ObjPtr<mirror::Class> cls,bool is_for_shared_region) const1752 bool Jit::CanEncodeClass(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const {
1753 return !is_for_shared_region || Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(cls);
1754 }
1755
CanEncodeString(ObjPtr<mirror::String> string,bool is_for_shared_region) const1756 bool Jit::CanEncodeString(ObjPtr<mirror::String> string, bool is_for_shared_region) const {
1757 return !is_for_shared_region || Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(string);
1758 }
1759
CanAssumeInitialized(ObjPtr<mirror::Class> cls,bool is_for_shared_region) const1760 bool Jit::CanAssumeInitialized(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const {
1761 if (!is_for_shared_region) {
1762 return cls->IsInitialized();
1763 } else {
1764 // Look up the class status in the oat file.
1765 const DexFile& dex_file = *cls->GetDexCache()->GetDexFile();
1766 const OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
1767 // In case we run without an image there won't be a backing oat file.
1768 if (oat_dex_file == nullptr || oat_dex_file->GetOatFile() == nullptr) {
1769 return false;
1770 }
1771 uint16_t class_def_index = cls->GetDexClassDefIndex();
1772 return oat_dex_file->GetOatClass(class_def_index).GetStatus() >= ClassStatus::kInitialized;
1773 }
1774 }
1775
MaybeEnqueueCompilation(ArtMethod * method,Thread * self)1776 void Jit::MaybeEnqueueCompilation(ArtMethod* method, Thread* self) {
1777 if (thread_pool_ == nullptr) {
1778 return;
1779 }
1780
1781 if (JitAtFirstUse()) {
1782 // Tests might request JIT on first use (compiled synchronously in the interpreter).
1783 return;
1784 }
1785
1786 if (!UseJitCompilation()) {
1787 return;
1788 }
1789
1790 if (IgnoreSamplesForMethod(method)) {
1791 return;
1792 }
1793
1794 if (GetCodeCache()->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
1795 if (!method->IsNative() && !code_cache_->IsOsrCompiled(method)) {
1796 // If we already have compiled code for it, nterp may be stuck in a loop.
1797 // Compile OSR.
1798 AddCompileTask(self, method, CompilationKind::kOsr);
1799 }
1800 return;
1801 }
1802
1803 // Check if we have precompiled this method.
1804 if (UNLIKELY(method->IsPreCompiled())) {
1805 if (!method->StillNeedsClinitCheck()) {
1806 const void* entry_point = code_cache_->GetSavedEntryPointOfPreCompiledMethod(method);
1807 if (entry_point != nullptr) {
1808 Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(method, entry_point);
1809 }
1810 }
1811 return;
1812 }
1813
1814 static constexpr size_t kIndividualSharedMethodHotnessThreshold = 0x3f;
1815 if (method->IsMemorySharedMethod()) {
1816 MutexLock mu(self, lock_);
1817 auto it = shared_method_counters_.find(method);
1818 if (it == shared_method_counters_.end()) {
1819 shared_method_counters_[method] = kIndividualSharedMethodHotnessThreshold;
1820 return;
1821 } else if (it->second != 0) {
1822 DCHECK_LE(it->second, kIndividualSharedMethodHotnessThreshold);
1823 shared_method_counters_[method] = it->second - 1;
1824 return;
1825 } else {
1826 shared_method_counters_[method] = kIndividualSharedMethodHotnessThreshold;
1827 }
1828 }
1829
1830 if (!method->IsNative() && GetCodeCache()->CanAllocateProfilingInfo()) {
1831 AddCompileTask(self, method, CompilationKind::kBaseline);
1832 } else {
1833 AddCompileTask(self, method, CompilationKind::kOptimized);
1834 }
1835 }
1836
CompileMethod(ArtMethod * method,Thread * self,CompilationKind compilation_kind,bool prejit)1837 bool Jit::CompileMethod(ArtMethod* method,
1838 Thread* self,
1839 CompilationKind compilation_kind,
1840 bool prejit) {
1841 ScopedCompilation sc(this, method, compilation_kind);
1842 // TODO: all current users of this method expect us to wait if it is being compiled.
1843 if (!sc.OwnsCompilation()) {
1844 return false;
1845 }
1846 // Fake being in a runtime thread so that class-load behavior will be the same as normal jit.
1847 ScopedSetRuntimeThread ssrt(self);
1848 // TODO(ngeoffray): For JIT at first use, use kPreCompile. Currently we don't due to
1849 // conflicts with jitzygote optimizations.
1850 return CompileMethodInternal(method, self, compilation_kind, prejit);
1851 }
1852
1853 } // namespace jit
1854 } // namespace art
1855