1 /* 2 * Copyright 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_JIT_JIT_H_ 18 #define ART_RUNTIME_JIT_JIT_H_ 19 20 #include <unordered_set> 21 22 #include <android-base/unique_fd.h> 23 24 #include "base/histogram-inl.h" 25 #include "base/macros.h" 26 #include "base/mutex.h" 27 #include "base/timing_logger.h" 28 #include "compilation_kind.h" 29 #include "handle.h" 30 #include "offsets.h" 31 #include "interpreter/mterp/nterp.h" 32 #include "jit/debugger_interface.h" 33 #include "jit_options.h" 34 #include "obj_ptr.h" 35 #include "thread_pool.h" 36 37 namespace art HIDDEN { 38 39 class ArtMethod; 40 class ClassLinker; 41 class DexFile; 42 class OatDexFile; 43 class RootVisitor; 44 struct RuntimeArgumentMap; 45 union JValue; 46 47 namespace mirror { 48 class Object; 49 class Class; 50 class ClassLoader; 51 class DexCache; 52 class String; 53 } // namespace mirror 54 55 namespace jit { 56 57 class JitCodeCache; 58 class JitCompileTask; 59 class JitMemoryRegion; 60 class JitOptions; 61 62 static constexpr int16_t kJitCheckForOSR = -1; 63 static constexpr int16_t kJitHotnessDisabled = -2; 64 65 // Implemented and provided by the compiler library. 66 class JitCompilerInterface { 67 public: ~JitCompilerInterface()68 virtual ~JitCompilerInterface() {} 69 virtual bool CompileMethod( 70 Thread* self, JitMemoryRegion* region, ArtMethod* method, CompilationKind compilation_kind) 71 REQUIRES_SHARED(Locks::mutator_lock_) = 0; 72 virtual void TypesLoaded(mirror::Class**, size_t count) 73 REQUIRES_SHARED(Locks::mutator_lock_) = 0; 74 virtual bool GenerateDebugInfo() = 0; 75 virtual void ParseCompilerOptions() = 0; 76 virtual bool IsBaselineCompiler() const = 0; 77 virtual void SetDebuggableCompilerOption(bool value) = 0; 78 virtual uint32_t GetInlineMaxCodeUnits() const = 0; 79 80 virtual std::vector<uint8_t> PackElfFileForJIT(ArrayRef<const JITCodeEntry*> elf_files, 81 ArrayRef<const void*> removed_symbols, 82 bool compress, 83 /*out*/ size_t* num_symbols) = 0; 84 }; 85 86 // Data structure holding information to perform an OSR. 87 struct OsrData { 88 // The native PC to jump to. 89 const uint8_t* native_pc; 90 91 // The frame size of the compiled code to jump to. 92 size_t frame_size; 93 94 // The dynamically allocated memory of size `frame_size` to copy to stack. 95 void* memory[0]; 96 NativePcOffsetOsrData97 static constexpr MemberOffset NativePcOffset() { 98 return MemberOffset(OFFSETOF_MEMBER(OsrData, native_pc)); 99 } 100 FrameSizeOffsetOsrData101 static constexpr MemberOffset FrameSizeOffset() { 102 return MemberOffset(OFFSETOF_MEMBER(OsrData, frame_size)); 103 } 104 MemoryOffsetOsrData105 static constexpr MemberOffset MemoryOffset() { 106 return MemberOffset(OFFSETOF_MEMBER(OsrData, memory)); 107 } 108 }; 109 110 /** 111 * A customized thread pool for the JIT, to prioritize compilation kinds, and 112 * simplify root visiting. 113 */ 114 class JitThreadPool : public AbstractThreadPool { 115 public: 116 static JitThreadPool* Create(const char* name, 117 size_t num_threads, 118 size_t worker_stack_size = ThreadPoolWorker::kDefaultStackSize) { 119 JitThreadPool* pool = new JitThreadPool(name, num_threads, worker_stack_size); 120 pool->CreateThreads(); 121 return pool; 122 } 123 124 // Add a task to the generic queue. This is for tasks like 125 // ZygoteVerificationTask, or JitCompileTask for precompile. 126 void AddTask(Thread* self, Task* task) REQUIRES(!task_queue_lock_) override; 127 size_t GetTaskCount(Thread* self) REQUIRES(!task_queue_lock_) override; 128 void RemoveAllTasks(Thread* self) REQUIRES(!task_queue_lock_) override; 129 ~JitThreadPool() override; 130 131 // Remove the task from the list of compiling tasks. 132 void Remove(JitCompileTask* task) REQUIRES(!task_queue_lock_); 133 134 // Add a custom compilation task in the right queue. 135 void AddTask(Thread* self, ArtMethod* method, CompilationKind kind) REQUIRES(!task_queue_lock_); 136 137 // Visit the ArtMethods stored in the various queues. 138 void VisitRoots(RootVisitor* visitor); 139 140 protected: 141 Task* TryGetTaskLocked() REQUIRES(task_queue_lock_) override; 142 HasOutstandingTasks()143 bool HasOutstandingTasks() const REQUIRES(task_queue_lock_) override { 144 return started_ && 145 (!generic_queue_.empty() || 146 !baseline_queue_.empty() || 147 !optimized_queue_.empty() || 148 !osr_queue_.empty()); 149 } 150 151 private: JitThreadPool(const char * name,size_t num_threads,size_t worker_stack_size)152 JitThreadPool(const char* name, 153 size_t num_threads, 154 size_t worker_stack_size) 155 // We need peers as we may report the JIT thread, e.g., in the debugger. 156 : AbstractThreadPool(name, num_threads, /* create_peers= */ true, worker_stack_size) {} 157 158 // Try to fetch an entry from `methods`. Return null if `methods` is empty. 159 Task* FetchFrom(std::deque<ArtMethod*>& methods, CompilationKind kind) REQUIRES(task_queue_lock_); 160 161 std::deque<Task*> generic_queue_ GUARDED_BY(task_queue_lock_); 162 163 std::deque<ArtMethod*> osr_queue_ GUARDED_BY(task_queue_lock_); 164 std::deque<ArtMethod*> baseline_queue_ GUARDED_BY(task_queue_lock_); 165 std::deque<ArtMethod*> optimized_queue_ GUARDED_BY(task_queue_lock_); 166 167 // We track the methods that are currently enqueued to avoid 168 // adding them to the queue multiple times, which could bloat the 169 // queues. 170 std::set<ArtMethod*> osr_enqueued_methods_ GUARDED_BY(task_queue_lock_); 171 std::set<ArtMethod*> baseline_enqueued_methods_ GUARDED_BY(task_queue_lock_); 172 std::set<ArtMethod*> optimized_enqueued_methods_ GUARDED_BY(task_queue_lock_); 173 174 // A set to keep track of methods that are currently being compiled. Entries 175 // will be removed when JitCompileTask->Finalize is called. 176 std::unordered_set<JitCompileTask*> current_compilations_ GUARDED_BY(task_queue_lock_); 177 178 DISALLOW_COPY_AND_ASSIGN(JitThreadPool); 179 }; 180 181 class Jit { 182 public: 183 // How frequently should the interpreter check to see if OSR compilation is ready. 184 static constexpr int16_t kJitRecheckOSRThreshold = 101; // Prime number to avoid patterns. 185 186 virtual ~Jit(); 187 188 // Create JIT itself. 189 static std::unique_ptr<Jit> Create(JitCodeCache* code_cache, JitOptions* options); 190 191 EXPORT bool CompileMethod(ArtMethod* method, 192 Thread* self, 193 CompilationKind compilation_kind, 194 bool prejit) REQUIRES_SHARED(Locks::mutator_lock_); 195 196 void VisitRoots(RootVisitor* visitor); 197 GetCodeCache()198 const JitCodeCache* GetCodeCache() const { 199 return code_cache_; 200 } 201 GetCodeCache()202 JitCodeCache* GetCodeCache() { 203 return code_cache_; 204 } 205 GetJitCompiler()206 JitCompilerInterface* GetJitCompiler() const { 207 return jit_compiler_; 208 } 209 210 void CreateThreadPool(); 211 void DeleteThreadPool(); 212 void WaitForWorkersToBeCreated(); 213 214 // Dump interesting info: #methods compiled, code vs data size, compile / verify cumulative 215 // loggers. 216 void DumpInfo(std::ostream& os) REQUIRES(!lock_); 217 // Add a timing logger to cumulative_timings_. 218 void AddTimingLogger(const TimingLogger& logger); 219 220 void AddMemoryUsage(ArtMethod* method, size_t bytes) 221 REQUIRES(!lock_) 222 REQUIRES_SHARED(Locks::mutator_lock_); 223 GetThreadPoolPthreadPriority()224 int GetThreadPoolPthreadPriority() const { 225 return options_->GetThreadPoolPthreadPriority(); 226 } 227 GetZygoteThreadPoolPthreadPriority()228 int GetZygoteThreadPoolPthreadPriority() const { 229 return options_->GetZygoteThreadPoolPthreadPriority(); 230 } 231 HotMethodThreshold()232 uint16_t HotMethodThreshold() const { 233 return options_->GetOptimizeThreshold(); 234 } 235 WarmMethodThreshold()236 uint16_t WarmMethodThreshold() const { 237 return options_->GetWarmupThreshold(); 238 } 239 PriorityThreadWeight()240 uint16_t PriorityThreadWeight() const { 241 return options_->GetPriorityThreadWeight(); 242 } 243 244 // Return whether we should do JIT compilation. Note this will returns false 245 // if we only need to save profile information and not compile methods. UseJitCompilation()246 bool UseJitCompilation() const { 247 return options_->UseJitCompilation(); 248 } 249 GetSaveProfilingInfo()250 bool GetSaveProfilingInfo() const { 251 return options_->GetSaveProfilingInfo(); 252 } 253 254 // Wait until there is no more pending compilation tasks. 255 EXPORT void WaitForCompilationToFinish(Thread* self); 256 257 // Profiling methods. 258 void MethodEntered(Thread* thread, ArtMethod* method) 259 REQUIRES_SHARED(Locks::mutator_lock_); 260 261 ALWAYS_INLINE void AddSamples(Thread* self, ArtMethod* method) 262 REQUIRES_SHARED(Locks::mutator_lock_); 263 NotifyInterpreterToCompiledCodeTransition(Thread * self,ArtMethod * caller)264 void NotifyInterpreterToCompiledCodeTransition(Thread* self, ArtMethod* caller) 265 REQUIRES_SHARED(Locks::mutator_lock_) { 266 AddSamples(self, caller); 267 } 268 NotifyCompiledCodeToInterpreterTransition(Thread * self,ArtMethod * callee)269 void NotifyCompiledCodeToInterpreterTransition(Thread* self, ArtMethod* callee) 270 REQUIRES_SHARED(Locks::mutator_lock_) { 271 AddSamples(self, callee); 272 } 273 274 // Starts the profile saver if the config options allow profile recording. 275 // The profile will be stored in the specified `profile_filename` and will contain 276 // information collected from the given `code_paths` (a set of dex locations). 277 // 278 // The `ref_profile_filename` denotes the path to the reference profile which 279 // might be queried to determine if an initial save should be done earlier. 280 // It can be empty indicating there is no reference profile. 281 void StartProfileSaver(const std::string& profile_filename, 282 const std::vector<std::string>& code_paths, 283 const std::string& ref_profile_filename); 284 void StopProfileSaver(); 285 286 void DumpForSigQuit(std::ostream& os) REQUIRES(!lock_); 287 288 static void NewTypeLoadedIfUsingJit(mirror::Class* type) 289 REQUIRES_SHARED(Locks::mutator_lock_); 290 291 // If debug info generation is turned on then write the type information for types already loaded 292 // into the specified class linker to the jit debug interface, 293 void DumpTypeInfoForLoadedTypes(ClassLinker* linker); 294 295 // Return whether we should try to JIT compiled code as soon as an ArtMethod is invoked. 296 EXPORT bool JitAtFirstUse(); 297 298 // Return whether we can invoke JIT code for `method`. 299 bool CanInvokeCompiledCode(ArtMethod* method); 300 301 // Return the information required to do an OSR jump. Return null if the OSR 302 // cannot be done. 303 OsrData* PrepareForOsr(ArtMethod* method, uint32_t dex_pc, uint32_t* vregs) 304 REQUIRES_SHARED(Locks::mutator_lock_); 305 306 // If an OSR compiled version is available for `method`, 307 // and `dex_pc + dex_pc_offset` is an entry point of that compiled 308 // version, this method will jump to the compiled code, let it run, 309 // and return true afterwards. Return false otherwise. 310 static bool MaybeDoOnStackReplacement(Thread* thread, 311 ArtMethod* method, 312 uint32_t dex_pc, 313 int32_t dex_pc_offset, 314 JValue* result) 315 REQUIRES_SHARED(Locks::mutator_lock_); 316 GetThreadPool()317 JitThreadPool* GetThreadPool() const { 318 return thread_pool_.get(); 319 } 320 321 // Stop the JIT by waiting for all current compilations and enqueued compilations to finish. 322 EXPORT void Stop(); 323 324 // Start JIT threads. 325 EXPORT void Start(); 326 327 // Transition to a child state. 328 EXPORT void PostForkChildAction(bool is_system_server, bool is_zygote); 329 330 // Prepare for forking. 331 EXPORT void PreZygoteFork(); 332 333 // Adjust state after forking. 334 void PostZygoteFork(); 335 336 // Add a task to the queue, ensuring it runs after boot is finished. 337 void AddPostBootTask(Thread* self, Task* task); 338 339 // Called when system finishes booting. 340 void BootCompleted(); 341 342 // Are we in a zygote using JIT compilation? 343 static bool InZygoteUsingJit(); 344 345 // Compile methods from the given profile (.prof extension). If `add_to_queue` 346 // is true, methods in the profile are added to the JIT queue. Otherwise they are compiled 347 // directly. 348 // Return the number of methods added to the queue. 349 uint32_t CompileMethodsFromProfile(Thread* self, 350 const std::vector<const DexFile*>& dex_files, 351 const std::string& profile_path, 352 Handle<mirror::ClassLoader> class_loader, 353 bool add_to_queue); 354 355 // Compile methods from the given boot profile (.bprof extension). If `add_to_queue` 356 // is true, methods in the profile are added to the JIT queue. Otherwise they are compiled 357 // directly. 358 // Return the number of methods added to the queue. 359 uint32_t CompileMethodsFromBootProfile(Thread* self, 360 const std::vector<const DexFile*>& dex_files, 361 const std::string& profile_path, 362 Handle<mirror::ClassLoader> class_loader, 363 bool add_to_queue); 364 365 // Register the dex files to the JIT. This is to perform any compilation/optimization 366 // at the point of loading the dex files. 367 void RegisterDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files, 368 jobject class_loader); 369 370 // Called by the compiler to know whether it can directly encode the 371 // method/class/string. 372 bool CanEncodeMethod(ArtMethod* method, bool is_for_shared_region) const 373 REQUIRES_SHARED(Locks::mutator_lock_); 374 bool CanEncodeClass(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const 375 REQUIRES_SHARED(Locks::mutator_lock_); 376 bool CanEncodeString(ObjPtr<mirror::String> string, bool is_for_shared_region) const 377 REQUIRES_SHARED(Locks::mutator_lock_); 378 bool CanAssumeInitialized(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const 379 REQUIRES_SHARED(Locks::mutator_lock_); 380 381 // Map boot image methods after all compilation in zygote has been done. 382 void MapBootImageMethods() REQUIRES(Locks::mutator_lock_); 383 384 // Notify to other processes that the zygote is done profile compiling boot 385 // class path methods. 386 void NotifyZygoteCompilationDone(); 387 388 EXPORT void EnqueueOptimizedCompilation(ArtMethod* method, Thread* self); 389 390 EXPORT void MaybeEnqueueCompilation(ArtMethod* method, Thread* self) 391 REQUIRES_SHARED(Locks::mutator_lock_); 392 393 EXPORT static bool TryPatternMatch(ArtMethod* method, CompilationKind compilation_kind) 394 REQUIRES_SHARED(Locks::mutator_lock_); 395 396 private: 397 Jit(JitCodeCache* code_cache, JitOptions* options); 398 399 // Whether we should not add hotness counts for the given method. 400 bool IgnoreSamplesForMethod(ArtMethod* method) 401 REQUIRES_SHARED(Locks::mutator_lock_); 402 403 // Compile an individual method listed in a profile. If `add_to_queue` is 404 // true and the method was resolved, return true. Otherwise return false. 405 bool CompileMethodFromProfile(Thread* self, 406 ClassLinker* linker, 407 uint32_t method_idx, 408 Handle<mirror::DexCache> dex_cache, 409 Handle<mirror::ClassLoader> class_loader, 410 bool add_to_queue, 411 bool compile_after_boot) 412 REQUIRES_SHARED(Locks::mutator_lock_); 413 414 static bool BindCompilerMethods(std::string* error_msg); 415 416 void AddCompileTask(Thread* self, 417 ArtMethod* method, 418 CompilationKind compilation_kind); 419 420 bool CompileMethodInternal(ArtMethod* method, 421 Thread* self, 422 CompilationKind compilation_kind, 423 bool prejit) 424 REQUIRES_SHARED(Locks::mutator_lock_); 425 426 // JIT compiler 427 EXPORT static JitCompilerInterface* jit_compiler_; 428 429 // JIT resources owned by runtime. 430 jit::JitCodeCache* const code_cache_; 431 const JitOptions* const options_; 432 433 std::unique_ptr<JitThreadPool> thread_pool_; 434 std::vector<std::unique_ptr<OatDexFile>> type_lookup_tables_; 435 436 Mutex boot_completed_lock_; 437 bool boot_completed_ GUARDED_BY(boot_completed_lock_) = false; 438 std::deque<Task*> tasks_after_boot_ GUARDED_BY(boot_completed_lock_); 439 440 // Performance monitoring. 441 CumulativeLogger cumulative_timings_; 442 Histogram<uint64_t> memory_use_ GUARDED_BY(lock_); 443 Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 444 445 // In the JIT zygote configuration, after all compilation is done, the zygote 446 // will copy its contents of the boot image to the zygote_mapping_methods_, 447 // which will be picked up by processes that will map the memory 448 // in-place within the boot image mapping. 449 // 450 // zygote_mapping_methods_ is shared memory only usable by the zygote and not 451 // inherited by child processes. We create it eagerly to ensure other 452 // processes cannot seal writable the file. 453 MemMap zygote_mapping_methods_; 454 455 // The file descriptor created through memfd_create pointing to memory holding 456 // boot image methods. Created by the zygote, and inherited by child 457 // processes. The descriptor will be closed in each process (including the 458 // zygote) once they don't need it. 459 android::base::unique_fd fd_methods_; 460 461 // The size of the memory pointed by `fd_methods_`. Cached here to avoid 462 // recomputing it. 463 size_t fd_methods_size_; 464 465 // Map of hotness counters for methods which we want to share the memory 466 // between the zygote and apps. 467 std::map<ArtMethod*, uint16_t> shared_method_counters_; 468 469 friend class art::jit::JitCompileTask; 470 471 DISALLOW_COPY_AND_ASSIGN(Jit); 472 }; 473 474 // Helper class to stop the JIT for a given scope. This will wait for the JIT to quiesce. 475 class EXPORT ScopedJitSuspend { 476 public: 477 ScopedJitSuspend(); 478 ~ScopedJitSuspend(); 479 480 private: 481 bool was_on_; 482 }; 483 484 } // namespace jit 485 } // namespace art 486 487 #endif // ART_RUNTIME_JIT_JIT_H_ 488