1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_RUNTIME_H_ 18 #define ART_RUNTIME_RUNTIME_H_ 19 20 #include <jni.h> 21 #include <stdio.h> 22 23 #include <iosfwd> 24 #include <set> 25 #include <string> 26 #include <utility> 27 #include <memory> 28 #include <vector> 29 30 #include "base/file_utils.h" 31 #include "base/locks.h" 32 #include "base/macros.h" 33 #include "base/mem_map.h" 34 #include "deoptimization_kind.h" 35 #include "dex/dex_file_types.h" 36 #include "experimental_flags.h" 37 #include "gc/space/image_space_loading_order.h" 38 #include "gc_root.h" 39 #include "instrumentation.h" 40 #include "jdwp_provider.h" 41 #include "obj_ptr.h" 42 #include "offsets.h" 43 #include "process_state.h" 44 #include "quick/quick_method_frame_info.h" 45 #include "runtime_stats.h" 46 47 namespace art { 48 49 namespace gc { 50 class AbstractSystemWeakHolder; 51 class Heap; 52 } // namespace gc 53 54 namespace hiddenapi { 55 enum class EnforcementPolicy; 56 } // namespace hiddenapi 57 58 namespace jit { 59 class Jit; 60 class JitCodeCache; 61 class JitOptions; 62 } // namespace jit 63 64 namespace mirror { 65 class Array; 66 class ClassLoader; 67 class DexCache; 68 template<class T> class ObjectArray; 69 template<class T> class PrimitiveArray; 70 typedef PrimitiveArray<int8_t> ByteArray; 71 class String; 72 class Throwable; 73 } // namespace mirror 74 namespace ti { 75 class Agent; 76 class AgentSpec; 77 } // namespace ti 78 namespace verifier { 79 class MethodVerifier; 80 enum class VerifyMode : int8_t; 81 } // namespace verifier 82 class ArenaPool; 83 class ArtMethod; 84 enum class CalleeSaveType: uint32_t; 85 class ClassLinker; 86 class CompilerCallbacks; 87 class DexFile; 88 enum class InstructionSet; 89 class InternTable; 90 class IsMarkedVisitor; 91 class JavaVMExt; 92 class LinearAlloc; 93 class MonitorList; 94 class MonitorPool; 95 class NullPointerHandler; 96 class OatFileAssistantTest; 97 class OatFileManager; 98 class Plugin; 99 struct RuntimeArgumentMap; 100 class RuntimeCallbacks; 101 class SignalCatcher; 102 class StackOverflowHandler; 103 class SuspensionHandler; 104 class ThreadList; 105 class ThreadPool; 106 class Trace; 107 struct TraceConfig; 108 class Transaction; 109 110 typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions; 111 112 class Runtime { 113 public: 114 // Parse raw runtime options. 115 static bool ParseOptions(const RuntimeOptions& raw_options, 116 bool ignore_unrecognized, 117 RuntimeArgumentMap* runtime_options); 118 119 // Creates and initializes a new runtime. 120 static bool Create(RuntimeArgumentMap&& runtime_options) 121 SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_); 122 123 // Creates and initializes a new runtime. 124 static bool Create(const RuntimeOptions& raw_options, bool ignore_unrecognized) 125 SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_); 126 127 // IsAotCompiler for compilers that don't have a running runtime. Only dex2oat currently. IsAotCompiler()128 bool IsAotCompiler() const { 129 return !UseJitCompilation() && IsCompiler(); 130 } 131 132 // IsCompiler is any runtime which has a running compiler, either dex2oat or JIT. IsCompiler()133 bool IsCompiler() const { 134 return compiler_callbacks_ != nullptr; 135 } 136 137 // If a compiler, are we compiling a boot image? 138 bool IsCompilingBootImage() const; 139 140 bool CanRelocate() const; 141 ShouldRelocate()142 bool ShouldRelocate() const { 143 return must_relocate_ && CanRelocate(); 144 } 145 MustRelocateIfPossible()146 bool MustRelocateIfPossible() const { 147 return must_relocate_; 148 } 149 IsImageDex2OatEnabled()150 bool IsImageDex2OatEnabled() const { 151 return image_dex2oat_enabled_; 152 } 153 GetCompilerCallbacks()154 CompilerCallbacks* GetCompilerCallbacks() { 155 return compiler_callbacks_; 156 } 157 SetCompilerCallbacks(CompilerCallbacks * callbacks)158 void SetCompilerCallbacks(CompilerCallbacks* callbacks) { 159 CHECK(callbacks != nullptr); 160 compiler_callbacks_ = callbacks; 161 } 162 IsZygote()163 bool IsZygote() const { 164 return is_zygote_; 165 } 166 IsSystemServer()167 bool IsSystemServer() const { 168 return is_system_server_; 169 } 170 SetSystemServer(bool value)171 void SetSystemServer(bool value) { 172 is_system_server_ = value; 173 } 174 IsExplicitGcDisabled()175 bool IsExplicitGcDisabled() const { 176 return is_explicit_gc_disabled_; 177 } 178 179 std::string GetCompilerExecutable() const; 180 GetCompilerOptions()181 const std::vector<std::string>& GetCompilerOptions() const { 182 return compiler_options_; 183 } 184 AddCompilerOption(const std::string & option)185 void AddCompilerOption(const std::string& option) { 186 compiler_options_.push_back(option); 187 } 188 GetImageCompilerOptions()189 const std::vector<std::string>& GetImageCompilerOptions() const { 190 return image_compiler_options_; 191 } 192 GetImageLocation()193 const std::string& GetImageLocation() const { 194 return image_location_; 195 } 196 IsUsingApexBootImageLocation()197 bool IsUsingApexBootImageLocation() const { 198 return is_using_apex_boot_image_location_; 199 } 200 201 // Starts a runtime, which may cause threads to be started and code to run. 202 bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_); 203 204 bool IsShuttingDown(Thread* self); IsShuttingDownLocked()205 bool IsShuttingDownLocked() const REQUIRES(Locks::runtime_shutdown_lock_) { 206 return shutting_down_; 207 } 208 NumberOfThreadsBeingBorn()209 size_t NumberOfThreadsBeingBorn() const REQUIRES(Locks::runtime_shutdown_lock_) { 210 return threads_being_born_; 211 } 212 StartThreadBirth()213 void StartThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) { 214 threads_being_born_++; 215 } 216 217 void EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_); 218 IsStarted()219 bool IsStarted() const { 220 return started_; 221 } 222 IsFinishedStarting()223 bool IsFinishedStarting() const { 224 return finished_starting_; 225 } 226 227 void RunRootClinits(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); 228 Current()229 static Runtime* Current() { 230 return instance_; 231 } 232 233 // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most 234 // callers should prefer. 235 NO_RETURN static void Abort(const char* msg) REQUIRES(!Locks::abort_lock_); 236 237 // Returns the "main" ThreadGroup, used when attaching user threads. 238 jobject GetMainThreadGroup() const; 239 240 // Returns the "system" ThreadGroup, used when attaching our internal threads. 241 jobject GetSystemThreadGroup() const; 242 243 // Returns the system ClassLoader which represents the CLASSPATH. 244 jobject GetSystemClassLoader() const; 245 246 // Attaches the calling native thread to the runtime. 247 bool AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group, 248 bool create_peer); 249 250 void CallExitHook(jint status); 251 252 // Detaches the current native thread from the runtime. 253 void DetachCurrentThread() REQUIRES(!Locks::mutator_lock_); 254 255 void DumpDeoptimizations(std::ostream& os); 256 void DumpForSigQuit(std::ostream& os); 257 void DumpLockHolders(std::ostream& os); 258 259 ~Runtime(); 260 GetBootClassPath()261 const std::vector<std::string>& GetBootClassPath() const { 262 return boot_class_path_; 263 } 264 GetBootClassPathLocations()265 const std::vector<std::string>& GetBootClassPathLocations() const { 266 DCHECK(boot_class_path_locations_.empty() || 267 boot_class_path_locations_.size() == boot_class_path_.size()); 268 return boot_class_path_locations_.empty() ? boot_class_path_ : boot_class_path_locations_; 269 } 270 GetClassPathString()271 const std::string& GetClassPathString() const { 272 return class_path_string_; 273 } 274 GetClassLinker()275 ClassLinker* GetClassLinker() const { 276 return class_linker_; 277 } 278 GetDefaultStackSize()279 size_t GetDefaultStackSize() const { 280 return default_stack_size_; 281 } 282 GetFinalizerTimeoutMs()283 unsigned int GetFinalizerTimeoutMs() const { 284 return finalizer_timeout_ms_; 285 } 286 GetHeap()287 gc::Heap* GetHeap() const { 288 return heap_; 289 } 290 GetInternTable()291 InternTable* GetInternTable() const { 292 DCHECK(intern_table_ != nullptr); 293 return intern_table_; 294 } 295 GetJavaVM()296 JavaVMExt* GetJavaVM() const { 297 return java_vm_.get(); 298 } 299 GetMaxSpinsBeforeThinLockInflation()300 size_t GetMaxSpinsBeforeThinLockInflation() const { 301 return max_spins_before_thin_lock_inflation_; 302 } 303 GetMonitorList()304 MonitorList* GetMonitorList() const { 305 return monitor_list_; 306 } 307 GetMonitorPool()308 MonitorPool* GetMonitorPool() const { 309 return monitor_pool_; 310 } 311 312 // Is the given object the special object used to mark a cleared JNI weak global? 313 bool IsClearedJniWeakGlobal(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_); 314 315 // Get the special object used to mark a cleared JNI weak global. 316 mirror::Object* GetClearedJniWeakGlobal() REQUIRES_SHARED(Locks::mutator_lock_); 317 318 mirror::Throwable* GetPreAllocatedOutOfMemoryErrorWhenThrowingException() 319 REQUIRES_SHARED(Locks::mutator_lock_); 320 mirror::Throwable* GetPreAllocatedOutOfMemoryErrorWhenThrowingOOME() 321 REQUIRES_SHARED(Locks::mutator_lock_); 322 mirror::Throwable* GetPreAllocatedOutOfMemoryErrorWhenHandlingStackOverflow() 323 REQUIRES_SHARED(Locks::mutator_lock_); 324 325 mirror::Throwable* GetPreAllocatedNoClassDefFoundError() 326 REQUIRES_SHARED(Locks::mutator_lock_); 327 GetProperties()328 const std::vector<std::string>& GetProperties() const { 329 return properties_; 330 } 331 GetThreadList()332 ThreadList* GetThreadList() const { 333 return thread_list_; 334 } 335 GetVersion()336 static const char* GetVersion() { 337 return "2.1.0"; 338 } 339 IsMethodHandlesEnabled()340 bool IsMethodHandlesEnabled() const { 341 return true; 342 } 343 344 void DisallowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_); 345 void AllowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_); 346 // broadcast_for_checkpoint is true when we broadcast for making blocking threads to respond to 347 // checkpoint requests. It's false when we broadcast to unblock blocking threads after system weak 348 // access is reenabled. 349 void BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint = false); 350 351 // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If 352 // clean_dirty is true then dirty roots will be marked as non-dirty after visiting. 353 void VisitRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots) 354 REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_) 355 REQUIRES_SHARED(Locks::mutator_lock_); 356 357 // Visit image roots, only used for hprof since the GC uses the image space mod union table 358 // instead. 359 void VisitImageRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_); 360 361 // Visit all of the roots we can safely visit concurrently. 362 void VisitConcurrentRoots(RootVisitor* visitor, 363 VisitRootFlags flags = kVisitRootFlagAllRoots) 364 REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_) 365 REQUIRES_SHARED(Locks::mutator_lock_); 366 367 // Visit all of the non thread roots, we can do this with mutators unpaused. 368 void VisitNonThreadRoots(RootVisitor* visitor) 369 REQUIRES_SHARED(Locks::mutator_lock_); 370 371 void VisitTransactionRoots(RootVisitor* visitor) 372 REQUIRES_SHARED(Locks::mutator_lock_); 373 374 // Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the 375 // system weak is updated to be the visitor's returned value. 376 void SweepSystemWeaks(IsMarkedVisitor* visitor) 377 REQUIRES_SHARED(Locks::mutator_lock_); 378 379 // Returns a special method that calls into a trampoline for runtime method resolution 380 ArtMethod* GetResolutionMethod(); 381 HasResolutionMethod()382 bool HasResolutionMethod() const { 383 return resolution_method_ != nullptr; 384 } 385 386 void SetResolutionMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_); ClearResolutionMethod()387 void ClearResolutionMethod() { 388 resolution_method_ = nullptr; 389 } 390 391 ArtMethod* CreateResolutionMethod() REQUIRES_SHARED(Locks::mutator_lock_); 392 393 // Returns a special method that calls into a trampoline for runtime imt conflicts. 394 ArtMethod* GetImtConflictMethod(); 395 ArtMethod* GetImtUnimplementedMethod(); 396 HasImtConflictMethod()397 bool HasImtConflictMethod() const { 398 return imt_conflict_method_ != nullptr; 399 } 400 ClearImtConflictMethod()401 void ClearImtConflictMethod() { 402 imt_conflict_method_ = nullptr; 403 } 404 405 void FixupConflictTables(); 406 void SetImtConflictMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_); 407 void SetImtUnimplementedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_); 408 409 ArtMethod* CreateImtConflictMethod(LinearAlloc* linear_alloc) 410 REQUIRES_SHARED(Locks::mutator_lock_); 411 ClearImtUnimplementedMethod()412 void ClearImtUnimplementedMethod() { 413 imt_unimplemented_method_ = nullptr; 414 } 415 HasCalleeSaveMethod(CalleeSaveType type)416 bool HasCalleeSaveMethod(CalleeSaveType type) const { 417 return callee_save_methods_[static_cast<size_t>(type)] != 0u; 418 } 419 420 ArtMethod* GetCalleeSaveMethod(CalleeSaveType type) 421 REQUIRES_SHARED(Locks::mutator_lock_); 422 423 ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type) 424 REQUIRES_SHARED(Locks::mutator_lock_); 425 426 QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method) 427 REQUIRES_SHARED(Locks::mutator_lock_); 428 GetCalleeSaveMethodOffset(CalleeSaveType type)429 static constexpr size_t GetCalleeSaveMethodOffset(CalleeSaveType type) { 430 return OFFSETOF_MEMBER(Runtime, callee_save_methods_[static_cast<size_t>(type)]); 431 } 432 GetInstructionSet()433 InstructionSet GetInstructionSet() const { 434 return instruction_set_; 435 } 436 437 void SetInstructionSet(InstructionSet instruction_set); 438 void ClearInstructionSet(); 439 440 void SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type); 441 void ClearCalleeSaveMethods(); 442 443 ArtMethod* CreateCalleeSaveMethod() REQUIRES_SHARED(Locks::mutator_lock_); 444 445 int32_t GetStat(int kind); 446 GetStats()447 RuntimeStats* GetStats() { 448 return &stats_; 449 } 450 HasStatsEnabled()451 bool HasStatsEnabled() const { 452 return stats_enabled_; 453 } 454 455 void ResetStats(int kinds); 456 457 void SetStatsEnabled(bool new_state) 458 REQUIRES(!Locks::instrument_entrypoints_lock_, !Locks::mutator_lock_); 459 460 enum class NativeBridgeAction { // private 461 kUnload, 462 kInitialize 463 }; 464 GetJit()465 jit::Jit* GetJit() const { 466 return jit_.get(); 467 } 468 469 // Returns true if JIT compilations are enabled. GetJit() will be not null in this case. 470 bool UseJitCompilation() const; 471 472 void PreZygoteFork(); 473 void PostZygoteFork(); 474 void InitNonZygoteOrPostFork( 475 JNIEnv* env, 476 bool is_system_server, 477 NativeBridgeAction action, 478 const char* isa, 479 bool profile_system_server = false); 480 GetInstrumentation()481 const instrumentation::Instrumentation* GetInstrumentation() const { 482 return &instrumentation_; 483 } 484 GetInstrumentation()485 instrumentation::Instrumentation* GetInstrumentation() { 486 return &instrumentation_; 487 } 488 489 void RegisterAppInfo(const std::vector<std::string>& code_paths, 490 const std::string& profile_output_filename); 491 492 // Transaction support. 493 bool IsActiveTransaction() const; 494 void EnterTransactionMode(); 495 void EnterTransactionMode(bool strict, mirror::Class* root); 496 void ExitTransactionMode(); 497 void RollbackAllTransactions() REQUIRES_SHARED(Locks::mutator_lock_); 498 // Transaction rollback and exit transaction are always done together, it's convenience to 499 // do them in one function. 500 void RollbackAndExitTransactionMode() REQUIRES_SHARED(Locks::mutator_lock_); 501 bool IsTransactionAborted() const; 502 const std::unique_ptr<Transaction>& GetTransaction() const; 503 bool IsActiveStrictTransactionMode() const; 504 505 void AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message) 506 REQUIRES_SHARED(Locks::mutator_lock_); 507 void ThrowTransactionAbortError(Thread* self) 508 REQUIRES_SHARED(Locks::mutator_lock_); 509 510 void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value, 511 bool is_volatile) const; 512 void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value, 513 bool is_volatile) const; 514 void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value, 515 bool is_volatile) const; 516 void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value, 517 bool is_volatile) const; 518 void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value, 519 bool is_volatile) const; 520 void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value, 521 bool is_volatile) const; 522 void RecordWriteFieldReference(mirror::Object* obj, 523 MemberOffset field_offset, 524 ObjPtr<mirror::Object> value, 525 bool is_volatile) const 526 REQUIRES_SHARED(Locks::mutator_lock_); 527 void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const 528 REQUIRES_SHARED(Locks::mutator_lock_); 529 void RecordStrongStringInsertion(ObjPtr<mirror::String> s) const 530 REQUIRES(Locks::intern_table_lock_); 531 void RecordWeakStringInsertion(ObjPtr<mirror::String> s) const 532 REQUIRES(Locks::intern_table_lock_); 533 void RecordStrongStringRemoval(ObjPtr<mirror::String> s) const 534 REQUIRES(Locks::intern_table_lock_); 535 void RecordWeakStringRemoval(ObjPtr<mirror::String> s) const 536 REQUIRES(Locks::intern_table_lock_); 537 void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx) const 538 REQUIRES_SHARED(Locks::mutator_lock_); 539 540 void SetFaultMessage(const std::string& message); 541 542 void AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* arg_vector) const; 543 ExplicitStackOverflowChecks()544 bool ExplicitStackOverflowChecks() const { 545 return !implicit_so_checks_; 546 } 547 548 void DisableVerifier(); 549 bool IsVerificationEnabled() const; 550 bool IsVerificationSoftFail() const; 551 SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy policy)552 void SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy policy) { 553 hidden_api_policy_ = policy; 554 } 555 GetHiddenApiEnforcementPolicy()556 hiddenapi::EnforcementPolicy GetHiddenApiEnforcementPolicy() const { 557 return hidden_api_policy_; 558 } 559 SetCorePlatformApiEnforcementPolicy(hiddenapi::EnforcementPolicy policy)560 void SetCorePlatformApiEnforcementPolicy(hiddenapi::EnforcementPolicy policy) { 561 core_platform_api_policy_ = policy; 562 } 563 GetCorePlatformApiEnforcementPolicy()564 hiddenapi::EnforcementPolicy GetCorePlatformApiEnforcementPolicy() const { 565 return core_platform_api_policy_; 566 } 567 SetHiddenApiExemptions(const std::vector<std::string> & exemptions)568 void SetHiddenApiExemptions(const std::vector<std::string>& exemptions) { 569 hidden_api_exemptions_ = exemptions; 570 } 571 GetHiddenApiExemptions()572 const std::vector<std::string>& GetHiddenApiExemptions() { 573 return hidden_api_exemptions_; 574 } 575 SetDedupeHiddenApiWarnings(bool value)576 void SetDedupeHiddenApiWarnings(bool value) { 577 dedupe_hidden_api_warnings_ = value; 578 } 579 ShouldDedupeHiddenApiWarnings()580 bool ShouldDedupeHiddenApiWarnings() { 581 return dedupe_hidden_api_warnings_; 582 } 583 SetHiddenApiEventLogSampleRate(uint32_t rate)584 void SetHiddenApiEventLogSampleRate(uint32_t rate) { 585 hidden_api_access_event_log_rate_ = rate; 586 } 587 GetHiddenApiEventLogSampleRate()588 uint32_t GetHiddenApiEventLogSampleRate() const { 589 return hidden_api_access_event_log_rate_; 590 } 591 GetProcessPackageName()592 const std::string& GetProcessPackageName() const { 593 return process_package_name_; 594 } 595 SetProcessPackageName(const char * package_name)596 void SetProcessPackageName(const char* package_name) { 597 if (package_name == nullptr) { 598 process_package_name_.clear(); 599 } else { 600 process_package_name_ = package_name; 601 } 602 } 603 GetProcessDataDirectory()604 const std::string& GetProcessDataDirectory() const { 605 return process_data_directory_; 606 } 607 SetProcessDataDirectory(const char * data_dir)608 void SetProcessDataDirectory(const char* data_dir) { 609 if (data_dir == nullptr) { 610 process_data_directory_.clear(); 611 } else { 612 process_data_directory_ = data_dir; 613 } 614 } 615 IsDexFileFallbackEnabled()616 bool IsDexFileFallbackEnabled() const { 617 return allow_dex_file_fallback_; 618 } 619 GetCpuAbilist()620 const std::vector<std::string>& GetCpuAbilist() const { 621 return cpu_abilist_; 622 } 623 IsRunningOnMemoryTool()624 bool IsRunningOnMemoryTool() const { 625 return is_running_on_memory_tool_; 626 } 627 SetTargetSdkVersion(uint32_t version)628 void SetTargetSdkVersion(uint32_t version) { 629 target_sdk_version_ = version; 630 } 631 GetTargetSdkVersion()632 uint32_t GetTargetSdkVersion() const { 633 return target_sdk_version_; 634 } 635 GetZygoteMaxFailedBoots()636 uint32_t GetZygoteMaxFailedBoots() const { 637 return zygote_max_failed_boots_; 638 } 639 AreExperimentalFlagsEnabled(ExperimentalFlags flags)640 bool AreExperimentalFlagsEnabled(ExperimentalFlags flags) { 641 return (experimental_flags_ & flags) != ExperimentalFlags::kNone; 642 } 643 644 void CreateJitCodeCache(bool rwx_memory_allowed); 645 646 // Create the JIT and instrumentation and code cache. 647 void CreateJit(); 648 GetArenaPool()649 ArenaPool* GetArenaPool() { 650 return arena_pool_.get(); 651 } GetJitArenaPool()652 ArenaPool* GetJitArenaPool() { 653 return jit_arena_pool_.get(); 654 } GetArenaPool()655 const ArenaPool* GetArenaPool() const { 656 return arena_pool_.get(); 657 } 658 659 void ReclaimArenaPoolMemory(); 660 GetLinearAlloc()661 LinearAlloc* GetLinearAlloc() { 662 return linear_alloc_.get(); 663 } 664 GetJITOptions()665 jit::JitOptions* GetJITOptions() { 666 return jit_options_.get(); 667 } 668 IsJavaDebuggable()669 bool IsJavaDebuggable() const { 670 return is_java_debuggable_; 671 } 672 673 void SetJavaDebuggable(bool value); 674 675 // Deoptimize the boot image, called for Java debuggable apps. 676 void DeoptimizeBootImage() REQUIRES(Locks::mutator_lock_); 677 IsNativeDebuggable()678 bool IsNativeDebuggable() const { 679 return is_native_debuggable_; 680 } 681 SetNativeDebuggable(bool value)682 void SetNativeDebuggable(bool value) { 683 is_native_debuggable_ = value; 684 } 685 AreNonStandardExitsEnabled()686 bool AreNonStandardExitsEnabled() const { 687 return non_standard_exits_enabled_; 688 } 689 SetNonStandardExitsEnabled()690 void SetNonStandardExitsEnabled() { 691 DoAndMaybeSwitchInterpreter([=](){ non_standard_exits_enabled_ = true; }); 692 } 693 AreAsyncExceptionsThrown()694 bool AreAsyncExceptionsThrown() const { 695 return async_exceptions_thrown_; 696 } 697 SetAsyncExceptionsThrown()698 void SetAsyncExceptionsThrown() { 699 DoAndMaybeSwitchInterpreter([=](){ async_exceptions_thrown_ = true; }); 700 } 701 702 // Change state and re-check which interpreter should be used. 703 // 704 // This must be called whenever there is an event that forces 705 // us to use different interpreter (e.g. debugger is attached). 706 // 707 // Changing the state using the lamda gives us some multihreading safety. 708 // It ensures that two calls do not interfere with each other and 709 // it makes it possible to DCHECK that thread local flag is correct. 710 template<typename Action> 711 static void DoAndMaybeSwitchInterpreter(Action lamda); 712 713 // Returns the build fingerprint, if set. Otherwise an empty string is returned. GetFingerprint()714 std::string GetFingerprint() { 715 return fingerprint_; 716 } 717 718 // Called from class linker. 719 void SetSentinel(mirror::Object* sentinel) REQUIRES_SHARED(Locks::mutator_lock_); 720 // For testing purpose only. 721 // TODO: Remove this when this is no longer needed (b/116087961). 722 GcRoot<mirror::Object> GetSentinel() REQUIRES_SHARED(Locks::mutator_lock_); 723 724 // Create a normal LinearAlloc or low 4gb version if we are 64 bit AOT compiler. 725 LinearAlloc* CreateLinearAlloc(); 726 GetOatFileManager()727 OatFileManager& GetOatFileManager() const { 728 DCHECK(oat_file_manager_ != nullptr); 729 return *oat_file_manager_; 730 } 731 732 double GetHashTableMinLoadFactor() const; 733 double GetHashTableMaxLoadFactor() const; 734 IsSafeMode()735 bool IsSafeMode() const { 736 return safe_mode_; 737 } 738 SetSafeMode(bool mode)739 void SetSafeMode(bool mode) { 740 safe_mode_ = mode; 741 } 742 GetDumpNativeStackOnSigQuit()743 bool GetDumpNativeStackOnSigQuit() const { 744 return dump_native_stack_on_sig_quit_; 745 } 746 GetPrunedDalvikCache()747 bool GetPrunedDalvikCache() const { 748 return pruned_dalvik_cache_; 749 } 750 SetPrunedDalvikCache(bool pruned)751 void SetPrunedDalvikCache(bool pruned) { 752 pruned_dalvik_cache_ = pruned; 753 } 754 755 void UpdateProcessState(ProcessState process_state); 756 757 // Returns true if we currently care about long mutator pause. InJankPerceptibleProcessState()758 bool InJankPerceptibleProcessState() const { 759 return process_state_ == kProcessStateJankPerceptible; 760 } 761 762 void RegisterSensitiveThread() const; 763 SetZygoteNoThreadSection(bool val)764 void SetZygoteNoThreadSection(bool val) { 765 zygote_no_threads_ = val; 766 } 767 IsZygoteNoThreadSection()768 bool IsZygoteNoThreadSection() const { 769 return zygote_no_threads_; 770 } 771 772 // Returns if the code can be deoptimized asynchronously. Code may be compiled with some 773 // optimization that makes it impossible to deoptimize. 774 bool IsAsyncDeoptimizeable(uintptr_t code) const REQUIRES_SHARED(Locks::mutator_lock_); 775 776 // Returns a saved copy of the environment (getenv/setenv values). 777 // Used by Fork to protect against overwriting LD_LIBRARY_PATH, etc. GetEnvSnapshot()778 char** GetEnvSnapshot() const { 779 return env_snapshot_.GetSnapshot(); 780 } 781 782 void AddSystemWeakHolder(gc::AbstractSystemWeakHolder* holder); 783 void RemoveSystemWeakHolder(gc::AbstractSystemWeakHolder* holder); 784 785 void AttachAgent(JNIEnv* env, const std::string& agent_arg, jobject class_loader); 786 GetAgents()787 const std::list<std::unique_ptr<ti::Agent>>& GetAgents() const { 788 return agents_; 789 } 790 791 RuntimeCallbacks* GetRuntimeCallbacks(); 792 HasLoadedPlugins()793 bool HasLoadedPlugins() const { 794 return !plugins_.empty(); 795 } 796 797 void InitThreadGroups(Thread* self); 798 SetDumpGCPerformanceOnShutdown(bool value)799 void SetDumpGCPerformanceOnShutdown(bool value) { 800 dump_gc_performance_on_shutdown_ = value; 801 } 802 GetDumpGCPerformanceOnShutdown()803 bool GetDumpGCPerformanceOnShutdown() const { 804 return dump_gc_performance_on_shutdown_; 805 } 806 IncrementDeoptimizationCount(DeoptimizationKind kind)807 void IncrementDeoptimizationCount(DeoptimizationKind kind) { 808 DCHECK_LE(kind, DeoptimizationKind::kLast); 809 deoptimization_counts_[static_cast<size_t>(kind)]++; 810 } 811 GetNumberOfDeoptimizations()812 uint32_t GetNumberOfDeoptimizations() const { 813 uint32_t result = 0; 814 for (size_t i = 0; i <= static_cast<size_t>(DeoptimizationKind::kLast); ++i) { 815 result += deoptimization_counts_[i]; 816 } 817 return result; 818 } 819 820 // Whether or not we use MADV_RANDOM on files that are thought to have random access patterns. 821 // This is beneficial for low RAM devices since it reduces page cache thrashing. MAdviseRandomAccess()822 bool MAdviseRandomAccess() const { 823 return madvise_random_access_; 824 } 825 GetJdwpOptions()826 const std::string& GetJdwpOptions() { 827 return jdwp_options_; 828 } 829 GetJdwpProvider()830 JdwpProvider GetJdwpProvider() const { 831 return jdwp_provider_; 832 } 833 GetVerifierLoggingThresholdMs()834 uint32_t GetVerifierLoggingThresholdMs() const { 835 return verifier_logging_threshold_ms_; 836 } 837 838 // Atomically delete the thread pool if the reference count is 0. 839 bool DeleteThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_); 840 841 // Wait for all the thread workers to be attached. 842 void WaitForThreadPoolWorkersToStart() REQUIRES(!Locks::runtime_thread_pool_lock_); 843 844 // Scoped usage of the runtime thread pool. Prevents the pool from being 845 // deleted. Note that the thread pool is only for startup and gets deleted after. 846 class ScopedThreadPoolUsage { 847 public: 848 ScopedThreadPoolUsage(); 849 ~ScopedThreadPoolUsage(); 850 851 // Return the thread pool. GetThreadPool()852 ThreadPool* GetThreadPool() const { 853 return thread_pool_; 854 } 855 856 private: 857 ThreadPool* const thread_pool_; 858 }; 859 LoadAppImageStartupCache()860 bool LoadAppImageStartupCache() const { 861 return load_app_image_startup_cache_; 862 } 863 SetLoadAppImageStartupCacheEnabled(bool enabled)864 void SetLoadAppImageStartupCacheEnabled(bool enabled) { 865 load_app_image_startup_cache_ = enabled; 866 } 867 868 // Notify the runtime that application startup is considered completed. Only has effect for the 869 // first call. 870 void NotifyStartupCompleted(); 871 872 // Return true if startup is already completed. 873 bool GetStartupCompleted() const; 874 GetImageSpaceLoadingOrder()875 gc::space::ImageSpaceLoadingOrder GetImageSpaceLoadingOrder() const { 876 return image_space_loading_order_; 877 } 878 879 private: 880 static void InitPlatformSignalHandlers(); 881 882 Runtime(); 883 884 void BlockSignals(); 885 886 bool Init(RuntimeArgumentMap&& runtime_options) 887 SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_); 888 void InitNativeMethods() REQUIRES(!Locks::mutator_lock_); 889 void RegisterRuntimeNativeMethods(JNIEnv* env); 890 891 void StartDaemonThreads(); 892 void StartSignalCatcher(); 893 894 void MaybeSaveJitProfilingInfo(); 895 896 // Visit all of the thread roots. 897 void VisitThreadRoots(RootVisitor* visitor, VisitRootFlags flags) 898 REQUIRES_SHARED(Locks::mutator_lock_); 899 900 // Visit all other roots which must be done with mutators suspended. 901 void VisitNonConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags) 902 REQUIRES_SHARED(Locks::mutator_lock_); 903 904 // Constant roots are the roots which never change after the runtime is initialized, they only 905 // need to be visited once per GC cycle. 906 void VisitConstantRoots(RootVisitor* visitor) 907 REQUIRES_SHARED(Locks::mutator_lock_); 908 909 // Note: To be lock-free, GetFaultMessage temporarily replaces the lock message with null. 910 // As such, there is a window where a call will return an empty string. In general, 911 // only aborting code should retrieve this data (via GetFaultMessageForAbortLogging 912 // friend). 913 std::string GetFaultMessage(); 914 915 ThreadPool* AcquireThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_); 916 void ReleaseThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_); 917 918 // A pointer to the active runtime or null. 919 static Runtime* instance_; 920 921 // NOTE: these must match the gc::ProcessState values as they come directly from the framework. 922 static constexpr int kProfileForground = 0; 923 static constexpr int kProfileBackground = 1; 924 925 static constexpr uint32_t kCalleeSaveSize = 6u; 926 927 // 64 bit so that we can share the same asm offsets for both 32 and 64 bits. 928 uint64_t callee_save_methods_[kCalleeSaveSize]; 929 // Pre-allocated exceptions (see Runtime::Init). 930 GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_when_throwing_exception_; 931 GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_when_throwing_oome_; 932 GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_when_handling_stack_overflow_; 933 GcRoot<mirror::Throwable> pre_allocated_NoClassDefFoundError_; 934 ArtMethod* resolution_method_; 935 ArtMethod* imt_conflict_method_; 936 // Unresolved method has the same behavior as the conflict method, it is used by the class linker 937 // for differentiating between unfilled imt slots vs conflict slots in superclasses. 938 ArtMethod* imt_unimplemented_method_; 939 940 // Special sentinel object used to invalid conditions in JNI (cleared weak references) and 941 // JDWP (invalid references). 942 GcRoot<mirror::Object> sentinel_; 943 944 InstructionSet instruction_set_; 945 946 CompilerCallbacks* compiler_callbacks_; 947 bool is_zygote_; 948 bool is_system_server_; 949 bool must_relocate_; 950 bool is_concurrent_gc_enabled_; 951 bool is_explicit_gc_disabled_; 952 bool image_dex2oat_enabled_; 953 954 std::string compiler_executable_; 955 std::vector<std::string> compiler_options_; 956 std::vector<std::string> image_compiler_options_; 957 std::string image_location_; 958 bool is_using_apex_boot_image_location_; 959 960 std::vector<std::string> boot_class_path_; 961 std::vector<std::string> boot_class_path_locations_; 962 std::string class_path_string_; 963 std::vector<std::string> properties_; 964 965 std::list<ti::AgentSpec> agent_specs_; 966 std::list<std::unique_ptr<ti::Agent>> agents_; 967 std::vector<Plugin> plugins_; 968 969 // The default stack size for managed threads created by the runtime. 970 size_t default_stack_size_; 971 972 // Finalizers running for longer than this many milliseconds abort the runtime. 973 unsigned int finalizer_timeout_ms_; 974 975 gc::Heap* heap_; 976 977 std::unique_ptr<ArenaPool> jit_arena_pool_; 978 std::unique_ptr<ArenaPool> arena_pool_; 979 // Special low 4gb pool for compiler linear alloc. We need ArtFields to be in low 4gb if we are 980 // compiling using a 32 bit image on a 64 bit compiler in case we resolve things in the image 981 // since the field arrays are int arrays in this case. 982 std::unique_ptr<ArenaPool> low_4gb_arena_pool_; 983 984 // Shared linear alloc for now. 985 std::unique_ptr<LinearAlloc> linear_alloc_; 986 987 // The number of spins that are done before thread suspension is used to forcibly inflate. 988 size_t max_spins_before_thin_lock_inflation_; 989 MonitorList* monitor_list_; 990 MonitorPool* monitor_pool_; 991 992 ThreadList* thread_list_; 993 994 InternTable* intern_table_; 995 996 ClassLinker* class_linker_; 997 998 SignalCatcher* signal_catcher_; 999 1000 std::unique_ptr<JavaVMExt> java_vm_; 1001 1002 std::unique_ptr<jit::Jit> jit_; 1003 std::unique_ptr<jit::JitCodeCache> jit_code_cache_; 1004 std::unique_ptr<jit::JitOptions> jit_options_; 1005 1006 // Runtime thread pool. The pool is only for startup and gets deleted after. 1007 std::unique_ptr<ThreadPool> thread_pool_ GUARDED_BY(Locks::runtime_thread_pool_lock_); 1008 size_t thread_pool_ref_count_ GUARDED_BY(Locks::runtime_thread_pool_lock_); 1009 1010 // Fault message, printed when we get a SIGSEGV. Stored as a native-heap object and accessed 1011 // lock-free, so needs to be atomic. 1012 std::atomic<std::string*> fault_message_; 1013 1014 // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by 1015 // the shutdown lock so that threads aren't born while we're shutting down. 1016 size_t threads_being_born_ GUARDED_BY(Locks::runtime_shutdown_lock_); 1017 1018 // Waited upon until no threads are being born. 1019 std::unique_ptr<ConditionVariable> shutdown_cond_ GUARDED_BY(Locks::runtime_shutdown_lock_); 1020 1021 // Set when runtime shutdown is past the point that new threads may attach. 1022 bool shutting_down_ GUARDED_BY(Locks::runtime_shutdown_lock_); 1023 1024 // The runtime is starting to shutdown but is blocked waiting on shutdown_cond_. 1025 bool shutting_down_started_ GUARDED_BY(Locks::runtime_shutdown_lock_); 1026 1027 bool started_; 1028 1029 // New flag added which tells us if the runtime has finished starting. If 1030 // this flag is set then the Daemon threads are created and the class loader 1031 // is created. This flag is needed for knowing if its safe to request CMS. 1032 bool finished_starting_; 1033 1034 // Hooks supported by JNI_CreateJavaVM 1035 jint (*vfprintf_)(FILE* stream, const char* format, va_list ap); 1036 void (*exit_)(jint status); 1037 void (*abort_)(); 1038 1039 bool stats_enabled_; 1040 RuntimeStats stats_; 1041 1042 const bool is_running_on_memory_tool_; 1043 1044 std::unique_ptr<TraceConfig> trace_config_; 1045 1046 instrumentation::Instrumentation instrumentation_; 1047 1048 jobject main_thread_group_; 1049 jobject system_thread_group_; 1050 1051 // As returned by ClassLoader.getSystemClassLoader(). 1052 jobject system_class_loader_; 1053 1054 // If true, then we dump the GC cumulative timings on shutdown. 1055 bool dump_gc_performance_on_shutdown_; 1056 1057 // Transactions used for pre-initializing classes at compilation time. 1058 // Support nested transactions, maintain a list containing all transactions. Transactions are 1059 // handled under a stack discipline. Because GC needs to go over all transactions, we choose list 1060 // as substantial data structure instead of stack. 1061 std::list<std::unique_ptr<Transaction>> preinitialization_transactions_; 1062 1063 // If kNone, verification is disabled. kEnable by default. 1064 verifier::VerifyMode verify_; 1065 1066 // If true, the runtime may use dex files directly with the interpreter if an oat file is not 1067 // available/usable. 1068 bool allow_dex_file_fallback_; 1069 1070 // List of supported cpu abis. 1071 std::vector<std::string> cpu_abilist_; 1072 1073 // Specifies target SDK version to allow workarounds for certain API levels. 1074 uint32_t target_sdk_version_; 1075 1076 // Implicit checks flags. 1077 bool implicit_null_checks_; // NullPointer checks are implicit. 1078 bool implicit_so_checks_; // StackOverflow checks are implicit. 1079 bool implicit_suspend_checks_; // Thread suspension checks are implicit. 1080 1081 // Whether or not the sig chain (and implicitly the fault handler) should be 1082 // disabled. Tools like dex2oat don't need them. This enables 1083 // building a statically link version of dex2oat. 1084 bool no_sig_chain_; 1085 1086 // Force the use of native bridge even if the app ISA matches the runtime ISA. 1087 bool force_native_bridge_; 1088 1089 // Whether or not a native bridge has been loaded. 1090 // 1091 // The native bridge allows running native code compiled for a foreign ISA. The way it works is, 1092 // if standard dlopen fails to load native library associated with native activity, it calls to 1093 // the native bridge to load it and then gets the trampoline for the entry to native activity. 1094 // 1095 // The option 'native_bridge_library_filename' specifies the name of the native bridge. 1096 // When non-empty the native bridge will be loaded from the given file. An empty value means 1097 // that there's no native bridge. 1098 bool is_native_bridge_loaded_; 1099 1100 // Whether we are running under native debugger. 1101 bool is_native_debuggable_; 1102 1103 // whether or not any async exceptions have ever been thrown. This is used to speed up the 1104 // MterpShouldSwitchInterpreters function. 1105 bool async_exceptions_thrown_; 1106 1107 // Whether anything is going to be using the shadow-frame APIs to force a function to return 1108 // early. Doing this requires that (1) we be debuggable and (2) that mterp is exited. 1109 bool non_standard_exits_enabled_; 1110 1111 // Whether Java code needs to be debuggable. 1112 bool is_java_debuggable_; 1113 1114 // The maximum number of failed boots we allow before pruning the dalvik cache 1115 // and trying again. This option is only inspected when we're running as a 1116 // zygote. 1117 uint32_t zygote_max_failed_boots_; 1118 1119 // Enable experimental opcodes that aren't fully specified yet. The intent is to 1120 // eventually publish them as public-usable opcodes, but they aren't ready yet. 1121 // 1122 // Experimental opcodes should not be used by other production code. 1123 ExperimentalFlags experimental_flags_; 1124 1125 // Contains the build fingerprint, if given as a parameter. 1126 std::string fingerprint_; 1127 1128 // Oat file manager, keeps track of what oat files are open. 1129 OatFileManager* oat_file_manager_; 1130 1131 // Whether or not we are on a low RAM device. 1132 bool is_low_memory_mode_; 1133 1134 // Whether or not we use MADV_RANDOM on files that are thought to have random access patterns. 1135 // This is beneficial for low RAM devices since it reduces page cache thrashing. 1136 bool madvise_random_access_; 1137 1138 // Whether the application should run in safe mode, that is, interpreter only. 1139 bool safe_mode_; 1140 1141 // Whether access checks on hidden API should be performed. 1142 hiddenapi::EnforcementPolicy hidden_api_policy_; 1143 1144 // Whether access checks on core platform API should be performed. 1145 hiddenapi::EnforcementPolicy core_platform_api_policy_; 1146 1147 // List of signature prefixes of methods that have been removed from the blacklist, and treated 1148 // as if whitelisted. 1149 std::vector<std::string> hidden_api_exemptions_; 1150 1151 // Do not warn about the same hidden API access violation twice. 1152 // This is only used for testing. 1153 bool dedupe_hidden_api_warnings_; 1154 1155 // How often to log hidden API access to the event log. An integer between 0 1156 // (never) and 0x10000 (always). 1157 uint32_t hidden_api_access_event_log_rate_; 1158 1159 // The package of the app running in this process. 1160 std::string process_package_name_; 1161 1162 // The data directory of the app running in this process. 1163 std::string process_data_directory_; 1164 1165 // Whether threads should dump their native stack on SIGQUIT. 1166 bool dump_native_stack_on_sig_quit_; 1167 1168 // Whether the dalvik cache was pruned when initializing the runtime. 1169 bool pruned_dalvik_cache_; 1170 1171 // Whether or not we currently care about pause times. 1172 ProcessState process_state_; 1173 1174 // Whether zygote code is in a section that should not start threads. 1175 bool zygote_no_threads_; 1176 1177 // The string containing requested jdwp options 1178 std::string jdwp_options_; 1179 1180 // The jdwp provider we were configured with. 1181 JdwpProvider jdwp_provider_; 1182 1183 // Saved environment. 1184 class EnvSnapshot { 1185 public: 1186 EnvSnapshot() = default; 1187 void TakeSnapshot(); 1188 char** GetSnapshot() const; 1189 1190 private: 1191 std::unique_ptr<char*[]> c_env_vector_; 1192 std::vector<std::unique_ptr<std::string>> name_value_pairs_; 1193 1194 DISALLOW_COPY_AND_ASSIGN(EnvSnapshot); 1195 } env_snapshot_; 1196 1197 // Generic system-weak holders. 1198 std::vector<gc::AbstractSystemWeakHolder*> system_weak_holders_; 1199 1200 std::unique_ptr<RuntimeCallbacks> callbacks_; 1201 1202 std::atomic<uint32_t> deoptimization_counts_[ 1203 static_cast<uint32_t>(DeoptimizationKind::kLast) + 1]; 1204 1205 MemMap protected_fault_page_; 1206 1207 uint32_t verifier_logging_threshold_ms_; 1208 1209 bool load_app_image_startup_cache_ = false; 1210 1211 // If startup has completed, must happen at most once. 1212 std::atomic<bool> startup_completed_ = false; 1213 1214 gc::space::ImageSpaceLoadingOrder image_space_loading_order_ = 1215 gc::space::ImageSpaceLoadingOrder::kSystemFirst; 1216 1217 // Note: See comments on GetFaultMessage. 1218 friend std::string GetFaultMessageForAbortLogging(); 1219 friend class ScopedThreadPoolUsage; 1220 friend class OatFileAssistantTest; 1221 1222 DISALLOW_COPY_AND_ASSIGN(Runtime); 1223 }; 1224 1225 } // namespace art 1226 1227 #endif // ART_RUNTIME_RUNTIME_H_ 1228