/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "thread.h" #include // for INT_MAX #include #include #include #include #include #include #include #include #include #include #include #include #include "android-base/file.h" #include "android-base/stringprintf.h" #include "android-base/strings.h" #include "unwindstack/AndroidUnwinder.h" #include "arch/context-inl.h" #include "arch/context.h" #include "art_field-inl.h" #include "art_method-inl.h" #include "base/atomic.h" #include "base/bit_utils.h" #include "base/casts.h" #include "base/file_utils.h" #include "base/memory_tool.h" #include "base/mutex.h" #include "base/stl_util.h" #include "base/systrace.h" #include "base/time_utils.h" #include "base/timing_logger.h" #include "base/to_str.h" #include "base/utils.h" #include "class_linker-inl.h" #include "class_root-inl.h" #include "debugger.h" #include "dex/descriptors_names.h" #include "dex/dex_file-inl.h" #include "dex/dex_file_annotations.h" #include "dex/dex_file_types.h" #include "entrypoints/entrypoint_utils.h" #include "entrypoints/quick/quick_alloc_entrypoints.h" #include "gc/accounting/card_table-inl.h" #include "gc/accounting/heap_bitmap-inl.h" #include "gc/allocator/rosalloc.h" #include "gc/heap.h" #include "gc/space/space-inl.h" #include "gc_root.h" #include "handle_scope-inl.h" #include "indirect_reference_table-inl.h" #include "instrumentation.h" #include "intern_table.h" #include "interpreter/interpreter.h" #include "interpreter/shadow_frame-inl.h" #include "java_frame_root_info.h" #include "jni/java_vm_ext.h" #include "jni/jni_internal.h" #include "mirror/class-alloc-inl.h" #include "mirror/class_loader.h" #include "mirror/object_array-alloc-inl.h" #include "mirror/object_array-inl.h" #include "mirror/stack_frame_info.h" #include "mirror/stack_trace_element.h" #include "monitor.h" #include "monitor_objects_stack_visitor.h" #include "native_stack_dump.h" #include "nativehelper/scoped_local_ref.h" #include "nativehelper/scoped_utf_chars.h" #include "nterp_helpers.h" #include "nth_caller_visitor.h" #include "oat_quick_method_header.h" #include "obj_ptr-inl.h" #include "object_lock.h" #include "palette/palette.h" #include "quick/quick_method_frame_info.h" #include "quick_exception_handler.h" #include "read_barrier-inl.h" #include "reflection.h" #include "reflective_handle_scope-inl.h" #include "runtime-inl.h" #include "runtime.h" #include "runtime_callbacks.h" #include "scoped_thread_state_change-inl.h" #include "scoped_disable_public_sdk_checker.h" #include "stack.h" #include "stack_map.h" #include "thread-inl.h" #include "thread_list.h" #include "trace.h" #include "verifier/method_verifier.h" #include "verify_object.h" #include "well_known_classes-inl.h" #if ART_USE_FUTEXES #include "linux/futex.h" #include "sys/syscall.h" #ifndef SYS_futex #define SYS_futex __NR_futex #endif #endif // ART_USE_FUTEXES #pragma clang diagnostic push #pragma clang diagnostic error "-Wconversion" extern "C" __attribute__((weak)) void* __hwasan_tag_pointer(const volatile void* p, unsigned char tag); namespace art { using android::base::StringAppendV; using android::base::StringPrintf; extern "C" NO_RETURN void artDeoptimize(Thread* self, bool skip_method_exit_callbacks); bool Thread::is_started_ = false; pthread_key_t Thread::pthread_key_self_; ConditionVariable* Thread::resume_cond_ = nullptr; const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA); bool (*Thread::is_sensitive_thread_hook_)() = nullptr; Thread* Thread::jit_sensitive_thread_ = nullptr; #ifndef __BIONIC__ thread_local Thread* Thread::self_tls_ = nullptr; #endif static constexpr bool kVerifyImageObjectsMarked = kIsDebugBuild; // For implicit overflow checks we reserve an extra piece of memory at the bottom // of the stack (lowest memory). The higher portion of the memory // is protected against reads and the lower is available for use while // throwing the StackOverflow exception. constexpr size_t kStackOverflowProtectedSize = 4 * kMemoryToolStackGuardSizeScale * KB; static const char* kThreadNameDuringStartup = ""; void Thread::InitCardTable() { tlsPtr_.card_table = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin(); } static void UnimplementedEntryPoint() { UNIMPLEMENTED(FATAL); } void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints, bool monitor_jni_entry_exit); void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_active); void Thread::SetIsGcMarkingAndUpdateEntrypoints(bool is_marking) { CHECK(gUseReadBarrier); tls32_.is_gc_marking = is_marking; UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active= */ is_marking); } void Thread::InitTlsEntryPoints() { ScopedTrace trace("InitTlsEntryPoints"); // Insert a placeholder so we can easily tell if we call an unimplemented entry point. uintptr_t* begin = reinterpret_cast(&tlsPtr_.jni_entrypoints); uintptr_t* end = reinterpret_cast( reinterpret_cast(&tlsPtr_.quick_entrypoints) + sizeof(tlsPtr_.quick_entrypoints)); for (uintptr_t* it = begin; it != end; ++it) { *it = reinterpret_cast(UnimplementedEntryPoint); } bool monitor_jni_entry_exit = false; PaletteShouldReportJniInvocations(&monitor_jni_entry_exit); if (monitor_jni_entry_exit) { AtomicSetFlag(ThreadFlag::kMonitorJniEntryExit); } InitEntryPoints(&tlsPtr_.jni_entrypoints, &tlsPtr_.quick_entrypoints, monitor_jni_entry_exit); } void Thread::ResetQuickAllocEntryPointsForThread() { ResetQuickAllocEntryPoints(&tlsPtr_.quick_entrypoints); } class DeoptimizationContextRecord { public: DeoptimizationContextRecord(const JValue& ret_val, bool is_reference, bool from_code, ObjPtr pending_exception, DeoptimizationMethodType method_type, DeoptimizationContextRecord* link) : ret_val_(ret_val), is_reference_(is_reference), from_code_(from_code), pending_exception_(pending_exception.Ptr()), deopt_method_type_(method_type), link_(link) {} JValue GetReturnValue() const { return ret_val_; } bool IsReference() const { return is_reference_; } bool GetFromCode() const { return from_code_; } ObjPtr GetPendingException() const REQUIRES_SHARED(Locks::mutator_lock_) { return pending_exception_; } DeoptimizationContextRecord* GetLink() const { return link_; } mirror::Object** GetReturnValueAsGCRoot() { DCHECK(is_reference_); return ret_val_.GetGCRoot(); } mirror::Object** GetPendingExceptionAsGCRoot() { return reinterpret_cast(&pending_exception_); } DeoptimizationMethodType GetDeoptimizationMethodType() const { return deopt_method_type_; } private: // The value returned by the method at the top of the stack before deoptimization. JValue ret_val_; // Indicates whether the returned value is a reference. If so, the GC will visit it. const bool is_reference_; // Whether the context was created from an explicit deoptimization in the code. const bool from_code_; // The exception that was pending before deoptimization (or null if there was no pending // exception). mirror::Throwable* pending_exception_; // Whether the context was created for an (idempotent) runtime method. const DeoptimizationMethodType deopt_method_type_; // A link to the previous DeoptimizationContextRecord. DeoptimizationContextRecord* const link_; DISALLOW_COPY_AND_ASSIGN(DeoptimizationContextRecord); }; class StackedShadowFrameRecord { public: StackedShadowFrameRecord(ShadowFrame* shadow_frame, StackedShadowFrameType type, StackedShadowFrameRecord* link) : shadow_frame_(shadow_frame), type_(type), link_(link) {} ShadowFrame* GetShadowFrame() const { return shadow_frame_; } StackedShadowFrameType GetType() const { return type_; } StackedShadowFrameRecord* GetLink() const { return link_; } private: ShadowFrame* const shadow_frame_; const StackedShadowFrameType type_; StackedShadowFrameRecord* const link_; DISALLOW_COPY_AND_ASSIGN(StackedShadowFrameRecord); }; void Thread::PushDeoptimizationContext(const JValue& return_value, bool is_reference, ObjPtr exception, bool from_code, DeoptimizationMethodType method_type) { DCHECK(exception != Thread::GetDeoptimizationException()); DeoptimizationContextRecord* record = new DeoptimizationContextRecord( return_value, is_reference, from_code, exception, method_type, tlsPtr_.deoptimization_context_stack); tlsPtr_.deoptimization_context_stack = record; } void Thread::PopDeoptimizationContext(JValue* result, ObjPtr* exception, bool* from_code, DeoptimizationMethodType* method_type) { AssertHasDeoptimizationContext(); DeoptimizationContextRecord* record = tlsPtr_.deoptimization_context_stack; tlsPtr_.deoptimization_context_stack = record->GetLink(); result->SetJ(record->GetReturnValue().GetJ()); *exception = record->GetPendingException(); *from_code = record->GetFromCode(); *method_type = record->GetDeoptimizationMethodType(); delete record; } void Thread::AssertHasDeoptimizationContext() { CHECK(tlsPtr_.deoptimization_context_stack != nullptr) << "No deoptimization context for thread " << *this; } enum { kPermitAvailable = 0, // Incrementing consumes the permit kNoPermit = 1, // Incrementing marks as waiter waiting kNoPermitWaiterWaiting = 2 }; void Thread::Park(bool is_absolute, int64_t time) { DCHECK(this == Thread::Current()); #if ART_USE_FUTEXES // Consume the permit, or mark as waiting. This cannot cause park_state to go // outside of its valid range (0, 1, 2), because in all cases where 2 is // assigned it is set back to 1 before returning, and this method cannot run // concurrently with itself since it operates on the current thread. int old_state = tls32_.park_state_.fetch_add(1, std::memory_order_relaxed); if (old_state == kNoPermit) { // no permit was available. block thread until later. Runtime::Current()->GetRuntimeCallbacks()->ThreadParkStart(is_absolute, time); bool timed_out = false; if (!is_absolute && time == 0) { // Thread.getState() is documented to return waiting for untimed parks. ScopedThreadSuspension sts(this, ThreadState::kWaiting); DCHECK_EQ(NumberOfHeldMutexes(), 0u); int result = futex(tls32_.park_state_.Address(), FUTEX_WAIT_PRIVATE, /* sleep if val = */ kNoPermitWaiterWaiting, /* timeout */ nullptr, nullptr, 0); // This errno check must happen before the scope is closed, to ensure that // no destructors (such as ScopedThreadSuspension) overwrite errno. if (result == -1) { switch (errno) { case EAGAIN: FALLTHROUGH_INTENDED; case EINTR: break; // park() is allowed to spuriously return default: PLOG(FATAL) << "Failed to park"; } } } else if (time > 0) { // Only actually suspend and futex_wait if we're going to wait for some // positive amount of time - the kernel will reject negative times with // EINVAL, and a zero time will just noop. // Thread.getState() is documented to return timed wait for timed parks. ScopedThreadSuspension sts(this, ThreadState::kTimedWaiting); DCHECK_EQ(NumberOfHeldMutexes(), 0u); timespec timespec; int result = 0; if (is_absolute) { // Time is millis when scheduled for an absolute time timespec.tv_nsec = (time % 1000) * 1000000; timespec.tv_sec = SaturatedTimeT(time / 1000); // This odd looking pattern is recommended by futex documentation to // wait until an absolute deadline, with otherwise identical behavior to // FUTEX_WAIT_PRIVATE. This also allows parkUntil() to return at the // correct time when the system clock changes. result = futex(tls32_.park_state_.Address(), FUTEX_WAIT_BITSET_PRIVATE | FUTEX_CLOCK_REALTIME, /* sleep if val = */ kNoPermitWaiterWaiting, ×pec, nullptr, static_cast(FUTEX_BITSET_MATCH_ANY)); } else { // Time is nanos when scheduled for a relative time timespec.tv_sec = SaturatedTimeT(time / 1000000000); timespec.tv_nsec = time % 1000000000; result = futex(tls32_.park_state_.Address(), FUTEX_WAIT_PRIVATE, /* sleep if val = */ kNoPermitWaiterWaiting, ×pec, nullptr, 0); } // This errno check must happen before the scope is closed, to ensure that // no destructors (such as ScopedThreadSuspension) overwrite errno. if (result == -1) { switch (errno) { case ETIMEDOUT: timed_out = true; FALLTHROUGH_INTENDED; case EAGAIN: case EINTR: break; // park() is allowed to spuriously return default: PLOG(FATAL) << "Failed to park"; } } } // Mark as no longer waiting, and consume permit if there is one. tls32_.park_state_.store(kNoPermit, std::memory_order_relaxed); // TODO: Call to signal jvmti here Runtime::Current()->GetRuntimeCallbacks()->ThreadParkFinished(timed_out); } else { // the fetch_add has consumed the permit. immediately return. DCHECK_EQ(old_state, kPermitAvailable); } #else #pragma clang diagnostic push #pragma clang diagnostic warning "-W#warnings" #warning "LockSupport.park/unpark implemented as noops without FUTEX support." #pragma clang diagnostic pop UNUSED(is_absolute, time); UNIMPLEMENTED(WARNING); sched_yield(); #endif } void Thread::Unpark() { #if ART_USE_FUTEXES // Set permit available; will be consumed either by fetch_add (when the thread // tries to park) or store (when the parked thread is woken up) if (tls32_.park_state_.exchange(kPermitAvailable, std::memory_order_relaxed) == kNoPermitWaiterWaiting) { int result = futex(tls32_.park_state_.Address(), FUTEX_WAKE_PRIVATE, /* number of waiters = */ 1, nullptr, nullptr, 0); if (result == -1) { PLOG(FATAL) << "Failed to unpark"; } } #else UNIMPLEMENTED(WARNING); #endif } void Thread::PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type) { StackedShadowFrameRecord* record = new StackedShadowFrameRecord( sf, type, tlsPtr_.stacked_shadow_frame_record); tlsPtr_.stacked_shadow_frame_record = record; } ShadowFrame* Thread::MaybePopDeoptimizedStackedShadowFrame() { StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record; if (record == nullptr || record->GetType() != StackedShadowFrameType::kDeoptimizationShadowFrame) { return nullptr; } return PopStackedShadowFrame(); } ShadowFrame* Thread::PopStackedShadowFrame() { StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record; DCHECK_NE(record, nullptr); tlsPtr_.stacked_shadow_frame_record = record->GetLink(); ShadowFrame* shadow_frame = record->GetShadowFrame(); delete record; return shadow_frame; } class FrameIdToShadowFrame { public: static FrameIdToShadowFrame* Create(size_t frame_id, ShadowFrame* shadow_frame, FrameIdToShadowFrame* next, size_t num_vregs) { // Append a bool array at the end to keep track of what vregs are updated by the debugger. uint8_t* memory = new uint8_t[sizeof(FrameIdToShadowFrame) + sizeof(bool) * num_vregs]; return new (memory) FrameIdToShadowFrame(frame_id, shadow_frame, next); } static void Delete(FrameIdToShadowFrame* f) { uint8_t* memory = reinterpret_cast(f); delete[] memory; } size_t GetFrameId() const { return frame_id_; } ShadowFrame* GetShadowFrame() const { return shadow_frame_; } FrameIdToShadowFrame* GetNext() const { return next_; } void SetNext(FrameIdToShadowFrame* next) { next_ = next; } bool* GetUpdatedVRegFlags() { return updated_vreg_flags_; } private: FrameIdToShadowFrame(size_t frame_id, ShadowFrame* shadow_frame, FrameIdToShadowFrame* next) : frame_id_(frame_id), shadow_frame_(shadow_frame), next_(next) {} const size_t frame_id_; ShadowFrame* const shadow_frame_; FrameIdToShadowFrame* next_; bool updated_vreg_flags_[0]; DISALLOW_COPY_AND_ASSIGN(FrameIdToShadowFrame); }; static FrameIdToShadowFrame* FindFrameIdToShadowFrame(FrameIdToShadowFrame* head, size_t frame_id) { FrameIdToShadowFrame* found = nullptr; for (FrameIdToShadowFrame* record = head; record != nullptr; record = record->GetNext()) { if (record->GetFrameId() == frame_id) { if (kIsDebugBuild) { // Check we have at most one record for this frame. CHECK(found == nullptr) << "Multiple records for the frame " << frame_id; found = record; } else { return record; } } } return found; } ShadowFrame* Thread::FindDebuggerShadowFrame(size_t frame_id) { FrameIdToShadowFrame* record = FindFrameIdToShadowFrame( tlsPtr_.frame_id_to_shadow_frame, frame_id); if (record != nullptr) { return record->GetShadowFrame(); } return nullptr; } // Must only be called when FindDebuggerShadowFrame(frame_id) returns non-nullptr. bool* Thread::GetUpdatedVRegFlags(size_t frame_id) { FrameIdToShadowFrame* record = FindFrameIdToShadowFrame( tlsPtr_.frame_id_to_shadow_frame, frame_id); CHECK(record != nullptr); return record->GetUpdatedVRegFlags(); } ShadowFrame* Thread::FindOrCreateDebuggerShadowFrame(size_t frame_id, uint32_t num_vregs, ArtMethod* method, uint32_t dex_pc) { ShadowFrame* shadow_frame = FindDebuggerShadowFrame(frame_id); if (shadow_frame != nullptr) { return shadow_frame; } VLOG(deopt) << "Create pre-deopted ShadowFrame for " << ArtMethod::PrettyMethod(method); shadow_frame = ShadowFrame::CreateDeoptimizedFrame(num_vregs, method, dex_pc); FrameIdToShadowFrame* record = FrameIdToShadowFrame::Create(frame_id, shadow_frame, tlsPtr_.frame_id_to_shadow_frame, num_vregs); for (uint32_t i = 0; i < num_vregs; i++) { // Do this to clear all references for root visitors. shadow_frame->SetVRegReference(i, nullptr); // This flag will be changed to true if the debugger modifies the value. record->GetUpdatedVRegFlags()[i] = false; } tlsPtr_.frame_id_to_shadow_frame = record; return shadow_frame; } TLSData* Thread::GetCustomTLS(const char* key) { MutexLock mu(Thread::Current(), *Locks::custom_tls_lock_); auto it = custom_tls_.find(key); return (it != custom_tls_.end()) ? it->second.get() : nullptr; } void Thread::SetCustomTLS(const char* key, TLSData* data) { // We will swap the old data (which might be nullptr) with this and then delete it outside of the // custom_tls_lock_. std::unique_ptr old_data(data); { MutexLock mu(Thread::Current(), *Locks::custom_tls_lock_); custom_tls_.GetOrCreate(key, []() { return std::unique_ptr(); }).swap(old_data); } } void Thread::RemoveDebuggerShadowFrameMapping(size_t frame_id) { FrameIdToShadowFrame* head = tlsPtr_.frame_id_to_shadow_frame; if (head->GetFrameId() == frame_id) { tlsPtr_.frame_id_to_shadow_frame = head->GetNext(); FrameIdToShadowFrame::Delete(head); return; } FrameIdToShadowFrame* prev = head; for (FrameIdToShadowFrame* record = head->GetNext(); record != nullptr; prev = record, record = record->GetNext()) { if (record->GetFrameId() == frame_id) { prev->SetNext(record->GetNext()); FrameIdToShadowFrame::Delete(record); return; } } LOG(FATAL) << "No shadow frame for frame " << frame_id; UNREACHABLE(); } void Thread::InitTid() { tls32_.tid = ::art::GetTid(); } void Thread::InitAfterFork() { // One thread (us) survived the fork, but we have a new tid so we need to // update the value stashed in this Thread*. InitTid(); } void Thread::DeleteJPeer(JNIEnv* env) { // Make sure nothing can observe both opeer and jpeer set at the same time. jobject old_jpeer = tlsPtr_.jpeer; CHECK(old_jpeer != nullptr); tlsPtr_.jpeer = nullptr; env->DeleteGlobalRef(old_jpeer); } void* Thread::CreateCallbackWithUffdGc(void* arg) { return Thread::CreateCallback(arg); } void* Thread::CreateCallback(void* arg) { Thread* self = reinterpret_cast(arg); Runtime* runtime = Runtime::Current(); if (runtime == nullptr) { LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self; return nullptr; } { // TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true // after self->Init(). MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_); // Check that if we got here we cannot be shutting down (as shutdown should never have started // while threads are being born). CHECK(!runtime->IsShuttingDownLocked()); // Note: given that the JNIEnv is created in the parent thread, the only failure point here is // a mess in InitStackHwm. We do not have a reasonable way to recover from that, so abort // the runtime in such a case. In case this ever changes, we need to make sure here to // delete the tmp_jni_env, as we own it at this point. CHECK(self->Init(runtime->GetThreadList(), runtime->GetJavaVM(), self->tlsPtr_.tmp_jni_env)); self->tlsPtr_.tmp_jni_env = nullptr; Runtime::Current()->EndThreadBirth(); } { ScopedObjectAccess soa(self); self->InitStringEntryPoints(); // Copy peer into self, deleting global reference when done. CHECK(self->tlsPtr_.jpeer != nullptr); self->tlsPtr_.opeer = soa.Decode(self->tlsPtr_.jpeer).Ptr(); // Make sure nothing can observe both opeer and jpeer set at the same time. self->DeleteJPeer(self->GetJniEnv()); self->SetThreadName(self->GetThreadName()->ToModifiedUtf8().c_str()); ArtField* priorityField = WellKnownClasses::java_lang_Thread_priority; self->SetNativePriority(priorityField->GetInt(self->tlsPtr_.opeer)); runtime->GetRuntimeCallbacks()->ThreadStart(self); // Unpark ourselves if the java peer was unparked before it started (see // b/28845097#comment49 for more information) ArtField* unparkedField = WellKnownClasses::java_lang_Thread_unparkedBeforeStart; bool should_unpark = false; { // Hold the lock here, so that if another thread calls unpark before the thread starts // we don't observe the unparkedBeforeStart field before the unparker writes to it, // which could cause a lost unpark. art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_); should_unpark = unparkedField->GetBoolean(self->tlsPtr_.opeer) == JNI_TRUE; } if (should_unpark) { self->Unpark(); } // Invoke the 'run' method of our java.lang.Thread. ObjPtr receiver = self->tlsPtr_.opeer; WellKnownClasses::java_lang_Thread_run->InvokeVirtual<'V'>(self, receiver); } // Detach and delete self. Runtime::Current()->GetThreadList()->Unregister(self, /* should_run_callbacks= */ true); return nullptr; } Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa, ObjPtr thread_peer) { ArtField* f = WellKnownClasses::java_lang_Thread_nativePeer; Thread* result = reinterpret_cast64(f->GetLong(thread_peer)); // Check that if we have a result it is either suspended or we hold the thread_list_lock_ // to stop it from going away. if (kIsDebugBuild) { MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_); if (result != nullptr && !result->IsSuspended()) { Locks::thread_list_lock_->AssertHeld(soa.Self()); } } return result; } Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa, jobject java_thread) { return FromManagedThread(soa, soa.Decode(java_thread)); } static size_t FixStackSize(size_t stack_size) { // A stack size of zero means "use the default". if (stack_size == 0) { stack_size = Runtime::Current()->GetDefaultStackSize(); } // Dalvik used the bionic pthread default stack size for native threads, // so include that here to support apps that expect large native stacks. stack_size += 1 * MB; // Under sanitization, frames of the interpreter may become bigger, both for C code as // well as the ShadowFrame. Ensure a larger minimum size. Otherwise initialization // of all core classes cannot be done in all test circumstances. if (kMemoryToolIsAvailable) { stack_size = std::max(2 * MB, stack_size); } // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN. if (stack_size < PTHREAD_STACK_MIN) { stack_size = PTHREAD_STACK_MIN; } if (Runtime::Current()->GetImplicitStackOverflowChecks()) { // If we are going to use implicit stack checks, allocate space for the protected // region at the bottom of the stack. stack_size += Thread::kStackOverflowImplicitCheckSize + GetStackOverflowReservedBytes(kRuntimeISA); } else { // It's likely that callers are trying to ensure they have at least a certain amount of // stack space, so we should add our reserved space on top of what they requested, rather // than implicitly take it away from them. stack_size += GetStackOverflowReservedBytes(kRuntimeISA); } // Some systems require the stack size to be a multiple of the system page size, so round up. stack_size = RoundUp(stack_size, kPageSize); return stack_size; } // Return the nearest page-aligned address below the current stack top. NO_INLINE static uint8_t* FindStackTop() { return reinterpret_cast( AlignDown(__builtin_frame_address(0), kPageSize)); } // Install a protected region in the stack. This is used to trigger a SIGSEGV if a stack // overflow is detected. It is located right below the stack_begin_. ATTRIBUTE_NO_SANITIZE_ADDRESS void Thread::InstallImplicitProtection() { uint8_t* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize; // Page containing current top of stack. uint8_t* stack_top = FindStackTop(); // Try to directly protect the stack. VLOG(threads) << "installing stack protected region at " << std::hex << static_cast(pregion) << " to " << static_cast(pregion + kStackOverflowProtectedSize - 1); if (ProtectStack(/* fatal_on_error= */ false)) { // Tell the kernel that we won't be needing these pages any more. // NB. madvise will probably write zeroes into the memory (on linux it does). size_t unwanted_size = reinterpret_cast(stack_top) - reinterpret_cast(pregion) - kPageSize; madvise(pregion, unwanted_size, MADV_DONTNEED); return; } // There is a little complexity here that deserves a special mention. On some // architectures, the stack is created using a VM_GROWSDOWN flag // to prevent memory being allocated when it's not needed. This flag makes the // kernel only allocate memory for the stack by growing down in memory. Because we // want to put an mprotected region far away from that at the stack top, we need // to make sure the pages for the stack are mapped in before we call mprotect. // // The failed mprotect in UnprotectStack is an indication of a thread with VM_GROWSDOWN // with a non-mapped stack (usually only the main thread). // // We map in the stack by reading every page from the stack bottom (highest address) // to the stack top. (We then madvise this away.) This must be done by reading from the // current stack pointer downwards. // // Accesses too far below the current machine register corresponding to the stack pointer (e.g., // ESP on x86[-32], SP on ARM) might cause a SIGSEGV (at least on x86 with newer kernels). We // thus have to move the stack pointer. We do this portably by using a recursive function with a // large stack frame size. // (Defensively) first remove the protection on the protected region as we'll want to read // and write it. Ignore errors. UnprotectStack(); VLOG(threads) << "Need to map in stack for thread at " << std::hex << static_cast(pregion); struct RecurseDownStack { // This function has an intentionally large stack size. #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wframe-larger-than=" NO_INLINE __attribute__((no_sanitize("memtag"))) static void Touch(uintptr_t target) { volatile size_t zero = 0; // Use a large local volatile array to ensure a large frame size. Do not use anything close // to a full page for ASAN. It would be nice to ensure the frame size is at most a page, but // there is no pragma support for this. // Note: for ASAN we need to shrink the array a bit, as there's other overhead. constexpr size_t kAsanMultiplier = #ifdef ADDRESS_SANITIZER 2u; #else 1u; #endif // Keep space uninitialized as it can overflow the stack otherwise (should Clang actually // auto-initialize this local variable). volatile char space[kPageSize - (kAsanMultiplier * 256)] __attribute__((uninitialized)); char sink ATTRIBUTE_UNUSED = space[zero]; // NOLINT // Remove tag from the pointer. Nop in non-hwasan builds. uintptr_t addr = reinterpret_cast( __hwasan_tag_pointer != nullptr ? __hwasan_tag_pointer(space, 0) : space); if (addr >= target + kPageSize) { Touch(target); } zero *= 2; // Try to avoid tail recursion. } #pragma GCC diagnostic pop }; RecurseDownStack::Touch(reinterpret_cast(pregion)); VLOG(threads) << "(again) installing stack protected region at " << std::hex << static_cast(pregion) << " to " << static_cast(pregion + kStackOverflowProtectedSize - 1); // Protect the bottom of the stack to prevent read/write to it. ProtectStack(/* fatal_on_error= */ true); // Tell the kernel that we won't be needing these pages any more. // NB. madvise will probably write zeroes into the memory (on linux it does). size_t unwanted_size = reinterpret_cast(stack_top) - reinterpret_cast(pregion) - kPageSize; madvise(pregion, unwanted_size, MADV_DONTNEED); } template static void SetNativePeer(ObjPtr java_peer, Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_) { ArtField* field = WellKnownClasses::java_lang_Thread_nativePeer; if (kSupportTransaction && Runtime::Current()->IsActiveTransaction()) { field->SetLong(java_peer, reinterpret_cast(thread)); } else { field->SetLong(java_peer, reinterpret_cast(thread)); } } static void SetNativePeer(JNIEnv* env, jobject java_peer, Thread* thread) { ScopedObjectAccess soa(env); SetNativePeer(soa.Decode(java_peer), thread); } void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) { CHECK(java_peer != nullptr); Thread* self = static_cast(env)->GetSelf(); if (VLOG_IS_ON(threads)) { ScopedObjectAccess soa(env); ArtField* f = WellKnownClasses::java_lang_Thread_name; ObjPtr java_name = f->GetObject(soa.Decode(java_peer))->AsString(); std::string thread_name; if (java_name != nullptr) { thread_name = java_name->ToModifiedUtf8(); } else { thread_name = "(Unnamed)"; } VLOG(threads) << "Creating native thread for " << thread_name; self->Dump(LOG_STREAM(INFO)); } Runtime* runtime = Runtime::Current(); // Atomically start the birth of the thread ensuring the runtime isn't shutting down. bool thread_start_during_shutdown = false; { MutexLock mu(self, *Locks::runtime_shutdown_lock_); if (runtime->IsShuttingDownLocked()) { thread_start_during_shutdown = true; } else { runtime->StartThreadBirth(); } } if (thread_start_during_shutdown) { ScopedLocalRef error_class(env, env->FindClass("java/lang/InternalError")); env->ThrowNew(error_class.get(), "Thread starting during runtime shutdown"); return; } Thread* child_thread = new Thread(is_daemon); // Use global JNI ref to hold peer live while child thread starts. child_thread->tlsPtr_.jpeer = env->NewGlobalRef(java_peer); stack_size = FixStackSize(stack_size); // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing // to assign it. SetNativePeer(env, java_peer, child_thread); // Try to allocate a JNIEnvExt for the thread. We do this here as we might be out of memory and // do not have a good way to report this on the child's side. std::string error_msg; std::unique_ptr child_jni_env_ext( JNIEnvExt::Create(child_thread, Runtime::Current()->GetJavaVM(), &error_msg)); int pthread_create_result = 0; if (child_jni_env_ext.get() != nullptr) { pthread_t new_pthread; pthread_attr_t attr; child_thread->tlsPtr_.tmp_jni_env = child_jni_env_ext.get(); CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread"); CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED), "PTHREAD_CREATE_DETACHED"); CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size); pthread_create_result = pthread_create(&new_pthread, &attr, gUseUserfaultfd ? Thread::CreateCallbackWithUffdGc : Thread::CreateCallback, child_thread); CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread"); if (pthread_create_result == 0) { // pthread_create started the new thread. The child is now responsible for managing the // JNIEnvExt we created. // Note: we can't check for tmp_jni_env == nullptr, as that would require synchronization // between the threads. child_jni_env_ext.release(); // NOLINT pthreads API. return; } } // Either JNIEnvExt::Create or pthread_create(3) failed, so clean up. { MutexLock mu(self, *Locks::runtime_shutdown_lock_); runtime->EndThreadBirth(); } // Manually delete the global reference since Thread::Init will not have been run. Make sure // nothing can observe both opeer and jpeer set at the same time. child_thread->DeleteJPeer(env); delete child_thread; child_thread = nullptr; // TODO: remove from thread group? SetNativePeer(env, java_peer, nullptr); { std::string msg(child_jni_env_ext.get() == nullptr ? StringPrintf("Could not allocate JNI Env: %s", error_msg.c_str()) : StringPrintf("pthread_create (%s stack) failed: %s", PrettySize(stack_size).c_str(), strerror(pthread_create_result))); ScopedObjectAccess soa(env); soa.Self()->ThrowOutOfMemoryError(msg.c_str()); } } bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_env_ext) { // This function does all the initialization that must be run by the native thread it applies to. // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so // we can handshake with the corresponding native thread when it's ready.) Check this native // thread hasn't been through here already... CHECK(Thread::Current() == nullptr); // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this // avoids pthread_self_ ever being invalid when discovered from Thread::Current(). tlsPtr_.pthread_self = pthread_self(); CHECK(is_started_); ScopedTrace trace("Thread::Init"); SetUpAlternateSignalStack(); if (!InitStackHwm()) { return false; } InitCpu(); InitTlsEntryPoints(); RemoveSuspendTrigger(); InitCardTable(); InitTid(); #ifdef __BIONIC__ __get_tls()[TLS_SLOT_ART_THREAD_SELF] = this; #else CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self"); Thread::self_tls_ = this; #endif DCHECK_EQ(Thread::Current(), this); tls32_.thin_lock_thread_id = thread_list->AllocThreadId(this); if (jni_env_ext != nullptr) { DCHECK_EQ(jni_env_ext->GetVm(), java_vm); DCHECK_EQ(jni_env_ext->GetSelf(), this); tlsPtr_.jni_env = jni_env_ext; } else { std::string error_msg; tlsPtr_.jni_env = JNIEnvExt::Create(this, java_vm, &error_msg); if (tlsPtr_.jni_env == nullptr) { LOG(ERROR) << "Failed to create JNIEnvExt: " << error_msg; return false; } } ScopedTrace trace3("ThreadList::Register"); thread_list->Register(this); return true; } template Thread* Thread::Attach(const char* thread_name, bool as_daemon, PeerAction peer_action, bool should_run_callbacks) { Runtime* runtime = Runtime::Current(); ScopedTrace trace("Thread::Attach"); if (runtime == nullptr) { LOG(ERROR) << "Thread attaching to non-existent runtime: " << ((thread_name != nullptr) ? thread_name : "(Unnamed)"); return nullptr; } Thread* self; { ScopedTrace trace2("Thread birth"); MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_); if (runtime->IsShuttingDownLocked()) { LOG(WARNING) << "Thread attaching while runtime is shutting down: " << ((thread_name != nullptr) ? thread_name : "(Unnamed)"); return nullptr; } else { Runtime::Current()->StartThreadBirth(); self = new Thread(as_daemon); bool init_success = self->Init(runtime->GetThreadList(), runtime->GetJavaVM()); Runtime::Current()->EndThreadBirth(); if (!init_success) { delete self; return nullptr; } } } self->InitStringEntryPoints(); CHECK_NE(self->GetState(), ThreadState::kRunnable); self->SetState(ThreadState::kNative); // Run the action that is acting on the peer. if (!peer_action(self)) { runtime->GetThreadList()->Unregister(self, should_run_callbacks); // Unregister deletes self, no need to do this here. return nullptr; } if (VLOG_IS_ON(threads)) { if (thread_name != nullptr) { VLOG(threads) << "Attaching thread " << thread_name; } else { VLOG(threads) << "Attaching unnamed thread."; } ScopedObjectAccess soa(self); self->Dump(LOG_STREAM(INFO)); } if (should_run_callbacks) { ScopedObjectAccess soa(self); runtime->GetRuntimeCallbacks()->ThreadStart(self); } return self; } Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_group, bool create_peer, bool should_run_callbacks) { auto create_peer_action = [&](Thread* self) { // If we're the main thread, ClassLinker won't be created until after we're attached, // so that thread needs a two-stage attach. Regular threads don't need this hack. // In the compiler, all threads need this hack, because no-one's going to be getting // a native peer! if (create_peer) { self->CreatePeer(thread_name, as_daemon, thread_group); if (self->IsExceptionPending()) { // We cannot keep the exception around, as we're deleting self. Try to be helpful and log // the failure but do not dump the exception details. If we fail to allocate the peer, we // usually also fail to allocate an exception object and throw a pre-allocated OOME without // any useful information. If we do manage to allocate the exception object, the memory // information in the message could have been collected too late and therefore misleading. { ScopedObjectAccess soa(self); LOG(ERROR) << "Exception creating thread peer: " << ((thread_name != nullptr) ? thread_name : ""); self->ClearException(); } return false; } } else { // These aren't necessary, but they improve diagnostics for unit tests & command-line tools. if (thread_name != nullptr) { self->SetCachedThreadName(thread_name); ::art::SetThreadName(thread_name); } else if (self->GetJniEnv()->IsCheckJniEnabled()) { LOG(WARNING) << *Thread::Current() << " attached without supplying a name"; } } return true; }; return Attach(thread_name, as_daemon, create_peer_action, should_run_callbacks); } Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_peer) { auto set_peer_action = [&](Thread* self) { // Install the given peer. DCHECK(self == Thread::Current()); ScopedObjectAccess soa(self); ObjPtr peer = soa.Decode(thread_peer); self->tlsPtr_.opeer = peer.Ptr(); SetNativePeer(peer, self); return true; }; return Attach(thread_name, as_daemon, set_peer_action, /* should_run_callbacks= */ true); } void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) { Runtime* runtime = Runtime::Current(); CHECK(runtime->IsStarted()); Thread* self = this; DCHECK_EQ(self, Thread::Current()); ScopedObjectAccess soa(self); StackHandleScope<4u> hs(self); DCHECK(WellKnownClasses::java_lang_ThreadGroup->IsInitialized()); Handle thr_group = hs.NewHandle(soa.Decode( thread_group != nullptr ? thread_group : runtime->GetMainThreadGroup())); Handle thread_name = hs.NewHandle( name != nullptr ? mirror::String::AllocFromModifiedUtf8(self, name) : nullptr); // Add missing null check in case of OOM b/18297817 if (name != nullptr && UNLIKELY(thread_name == nullptr)) { CHECK(self->IsExceptionPending()); return; } jint thread_priority = GetNativePriority(); DCHECK(WellKnownClasses::java_lang_Thread->IsInitialized()); Handle peer = hs.NewHandle(WellKnownClasses::java_lang_Thread->AllocObject(self)); if (UNLIKELY(peer == nullptr)) { CHECK(IsExceptionPending()); return; } tlsPtr_.opeer = peer.Get(); WellKnownClasses::java_lang_Thread_init->InvokeInstance<'V', 'L', 'L', 'I', 'Z'>( self, peer.Get(), thr_group.Get(), thread_name.Get(), thread_priority, as_daemon); if (self->IsExceptionPending()) { return; } SetNativePeer(peer.Get(), self); MutableHandle peer_thread_name(hs.NewHandle(GetThreadName())); if (peer_thread_name == nullptr) { // The Thread constructor should have set the Thread.name to a // non-null value. However, because we can run without code // available (in the compiler, in tests), we manually assign the // fields the constructor should have set. if (runtime->IsActiveTransaction()) { InitPeer(tlsPtr_.opeer, as_daemon, thr_group.Get(), thread_name.Get(), thread_priority); } else { InitPeer(tlsPtr_.opeer, as_daemon, thr_group.Get(), thread_name.Get(), thread_priority); } peer_thread_name.Assign(GetThreadName()); } // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null. if (peer_thread_name != nullptr) { SetThreadName(peer_thread_name->ToModifiedUtf8().c_str()); } } ObjPtr Thread::CreateCompileTimePeer(const char* name, bool as_daemon, jobject thread_group) { Runtime* runtime = Runtime::Current(); CHECK(!runtime->IsStarted()); Thread* self = this; DCHECK_EQ(self, Thread::Current()); ScopedObjectAccessUnchecked soa(self); StackHandleScope<3u> hs(self); DCHECK(WellKnownClasses::java_lang_ThreadGroup->IsInitialized()); Handle thr_group = hs.NewHandle(soa.Decode( thread_group != nullptr ? thread_group : runtime->GetMainThreadGroup())); Handle thread_name = hs.NewHandle( name != nullptr ? mirror::String::AllocFromModifiedUtf8(self, name) : nullptr); // Add missing null check in case of OOM b/18297817 if (name != nullptr && UNLIKELY(thread_name == nullptr)) { CHECK(self->IsExceptionPending()); return nullptr; } jint thread_priority = kNormThreadPriority; // Always normalize to NORM priority. DCHECK(WellKnownClasses::java_lang_Thread->IsInitialized()); Handle peer = hs.NewHandle( WellKnownClasses::java_lang_Thread->AllocObject(self)); if (peer == nullptr) { CHECK(Thread::Current()->IsExceptionPending()); return nullptr; } // We cannot call Thread.init, as it will recursively ask for currentThread. // The Thread constructor should have set the Thread.name to a // non-null value. However, because we can run without code // available (in the compiler, in tests), we manually assign the // fields the constructor should have set. if (runtime->IsActiveTransaction()) { InitPeer(peer.Get(), as_daemon, thr_group.Get(), thread_name.Get(), thread_priority); } else { InitPeer(peer.Get(), as_daemon, thr_group.Get(), thread_name.Get(), thread_priority); } return peer.Get(); } template void Thread::InitPeer(ObjPtr peer, bool as_daemon, ObjPtr thread_group, ObjPtr thread_name, jint thread_priority) { WellKnownClasses::java_lang_Thread_daemon->SetBoolean(peer, static_cast(as_daemon ? 1u : 0u)); WellKnownClasses::java_lang_Thread_group->SetObject(peer, thread_group); WellKnownClasses::java_lang_Thread_name->SetObject(peer, thread_name); WellKnownClasses::java_lang_Thread_priority->SetInt(peer, thread_priority); } void Thread::SetCachedThreadName(const char* name) { DCHECK(name != kThreadNameDuringStartup); const char* old_name = tlsPtr_.name.exchange(name == nullptr ? nullptr : strdup(name)); if (old_name != nullptr && old_name != kThreadNameDuringStartup) { // Deallocate it, carefully. Note that the load has to be ordered wrt the store of the xchg. for (uint32_t i = 0; UNLIKELY(tls32_.num_name_readers.load(std::memory_order_seq_cst) != 0); ++i) { static constexpr uint32_t kNumSpins = 1000; // Ugly, but keeps us from having to do anything on the reader side. if (i > kNumSpins) { usleep(500); } } // We saw the reader count drop to zero since we replaced the name; old one is now safe to // deallocate. free(const_cast(old_name)); } } void Thread::SetThreadName(const char* name) { SetCachedThreadName(name); ::art::SetThreadName(name); Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM")); } static void GetThreadStack(pthread_t thread, void** stack_base, size_t* stack_size, size_t* guard_size) { #if defined(__APPLE__) *stack_size = pthread_get_stacksize_np(thread); void* stack_addr = pthread_get_stackaddr_np(thread); // Check whether stack_addr is the base or end of the stack. // (On Mac OS 10.7, it's the end.) int stack_variable; if (stack_addr > &stack_variable) { *stack_base = reinterpret_cast(stack_addr) - *stack_size; } else { *stack_base = stack_addr; } // This is wrong, but there doesn't seem to be a way to get the actual value on the Mac. pthread_attr_t attributes; CHECK_PTHREAD_CALL(pthread_attr_init, (&attributes), __FUNCTION__); CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, guard_size), __FUNCTION__); CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__); #else pthread_attr_t attributes; CHECK_PTHREAD_CALL(pthread_getattr_np, (thread, &attributes), __FUNCTION__); CHECK_PTHREAD_CALL(pthread_attr_getstack, (&attributes, stack_base, stack_size), __FUNCTION__); CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, guard_size), __FUNCTION__); CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__); #if defined(__GLIBC__) // If we're the main thread, check whether we were run with an unlimited stack. In that case, // glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection // will be broken because we'll die long before we get close to 2GB. bool is_main_thread = (::art::GetTid() == static_cast(getpid())); if (is_main_thread) { rlimit stack_limit; if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) { PLOG(FATAL) << "getrlimit(RLIMIT_STACK) failed"; } if (stack_limit.rlim_cur == RLIM_INFINITY) { size_t old_stack_size = *stack_size; // Use the kernel default limit as our size, and adjust the base to match. *stack_size = 8 * MB; *stack_base = reinterpret_cast(*stack_base) + (old_stack_size - *stack_size); VLOG(threads) << "Limiting unlimited stack (reported as " << PrettySize(old_stack_size) << ")" << " to " << PrettySize(*stack_size) << " with base " << *stack_base; } } #endif #endif } bool Thread::InitStackHwm() { ScopedTrace trace("InitStackHwm"); void* read_stack_base; size_t read_stack_size; size_t read_guard_size; GetThreadStack(tlsPtr_.pthread_self, &read_stack_base, &read_stack_size, &read_guard_size); tlsPtr_.stack_begin = reinterpret_cast(read_stack_base); tlsPtr_.stack_size = read_stack_size; // The minimum stack size we can cope with is the overflow reserved bytes (typically // 8K) + the protected region size (4K) + another page (4K). Typically this will // be 8+4+4 = 16K. The thread won't be able to do much with this stack even the GC takes // between 8K and 12K. uint32_t min_stack = GetStackOverflowReservedBytes(kRuntimeISA) + kStackOverflowProtectedSize + 4 * KB; if (read_stack_size <= min_stack) { // Note, as we know the stack is small, avoid operations that could use a lot of stack. LogHelper::LogLineLowStack(__PRETTY_FUNCTION__, __LINE__, ::android::base::ERROR, "Attempt to attach a thread with a too-small stack"); return false; } // This is included in the SIGQUIT output, but it's useful here for thread debugging. VLOG(threads) << StringPrintf("Native stack is at %p (%s with %s guard)", read_stack_base, PrettySize(read_stack_size).c_str(), PrettySize(read_guard_size).c_str()); // Set stack_end_ to the bottom of the stack saving space of stack overflows Runtime* runtime = Runtime::Current(); bool implicit_stack_check = runtime->GetImplicitStackOverflowChecks() && !runtime->IsAotCompiler(); ResetDefaultStackEnd(); // Install the protected region if we are doing implicit overflow checks. if (implicit_stack_check) { // The thread might have protected region at the bottom. We need // to install our own region so we need to move the limits // of the stack to make room for it. tlsPtr_.stack_begin += read_guard_size + kStackOverflowProtectedSize; tlsPtr_.stack_end += read_guard_size + kStackOverflowProtectedSize; tlsPtr_.stack_size -= read_guard_size + kStackOverflowProtectedSize; InstallImplicitProtection(); } // Consistency check. CHECK_GT(FindStackTop(), reinterpret_cast(tlsPtr_.stack_end)); return true; } void Thread::ShortDump(std::ostream& os) const { os << "Thread["; if (GetThreadId() != 0) { // If we're in kStarting, we won't have a thin lock id or tid yet. os << GetThreadId() << ",tid=" << GetTid() << ','; } tls32_.num_name_readers.fetch_add(1, std::memory_order_seq_cst); const char* name = tlsPtr_.name.load(); os << GetState() << ",Thread*=" << this << ",peer=" << tlsPtr_.opeer << ",\"" << (name == nullptr ? "null" : name) << "\"" << "]"; tls32_.num_name_readers.fetch_sub(1 /* at least memory_order_release */); } Thread::DumpOrder Thread::Dump(std::ostream& os, bool dump_native_stack, bool force_dump_stack) const { DumpState(os); return DumpStack(os, dump_native_stack, force_dump_stack); } Thread::DumpOrder Thread::Dump(std::ostream& os, unwindstack::AndroidLocalUnwinder& unwinder, bool dump_native_stack, bool force_dump_stack) const { DumpState(os); return DumpStack(os, unwinder, dump_native_stack, force_dump_stack); } ObjPtr Thread::GetThreadName() const { if (tlsPtr_.opeer == nullptr) { return nullptr; } ObjPtr name = WellKnownClasses::java_lang_Thread_name->GetObject(tlsPtr_.opeer); return name == nullptr ? nullptr : name->AsString(); } void Thread::GetThreadName(std::string& name) const { tls32_.num_name_readers.fetch_add(1, std::memory_order_seq_cst); // The store part of the increment has to be ordered with respect to the following load. name.assign(tlsPtr_.name.load(std::memory_order_seq_cst)); tls32_.num_name_readers.fetch_sub(1 /* at least memory_order_release */); } uint64_t Thread::GetCpuMicroTime() const { #if defined(__linux__) clockid_t cpu_clock_id; pthread_getcpuclockid(tlsPtr_.pthread_self, &cpu_clock_id); timespec now; clock_gettime(cpu_clock_id, &now); return static_cast(now.tv_sec) * UINT64_C(1000000) + static_cast(now.tv_nsec) / UINT64_C(1000); #else // __APPLE__ UNIMPLEMENTED(WARNING); return -1; #endif } // Attempt to rectify locks so that we dump thread list with required locks before exiting. static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREAD_SAFETY_ANALYSIS { LOG(ERROR) << *thread << " suspend count already zero."; Locks::thread_suspend_count_lock_->Unlock(self); if (!Locks::mutator_lock_->IsSharedHeld(self)) { Locks::mutator_lock_->SharedTryLock(self); if (!Locks::mutator_lock_->IsSharedHeld(self)) { LOG(WARNING) << "Dumping thread list without holding mutator_lock_"; } } if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) { Locks::thread_list_lock_->TryLock(self); if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) { LOG(WARNING) << "Dumping thread list without holding thread_list_lock_"; } } std::ostringstream ss; Runtime::Current()->GetThreadList()->Dump(ss); LOG(FATAL) << ss.str(); } bool Thread::ModifySuspendCountInternal(Thread* self, int delta, AtomicInteger* suspend_barrier, SuspendReason reason) { if (kIsDebugBuild) { DCHECK(delta == -1 || delta == +1) << reason << " " << delta << " " << this; Locks::thread_suspend_count_lock_->AssertHeld(self); if (this != self && !IsSuspended()) { Locks::thread_list_lock_->AssertHeld(self); } } // User code suspensions need to be checked more closely since they originate from code outside of // the runtime's control. if (UNLIKELY(reason == SuspendReason::kForUserCode)) { Locks::user_code_suspension_lock_->AssertHeld(self); if (UNLIKELY(delta + tls32_.user_code_suspend_count < 0)) { LOG(ERROR) << "attempting to modify suspend count in an illegal way."; return false; } } if (UNLIKELY(delta < 0 && tls32_.suspend_count <= 0)) { UnsafeLogFatalForSuspendCount(self, this); return false; } if (delta > 0 && this != self && tlsPtr_.flip_function != nullptr) { // Force retry of a suspend request if it's in the middle of a thread flip to avoid a // deadlock. b/31683379. return false; } uint32_t flags = enum_cast(ThreadFlag::kSuspendRequest); if (delta > 0 && suspend_barrier != nullptr) { uint32_t available_barrier = kMaxSuspendBarriers; for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { if (tlsPtr_.active_suspend_barriers[i] == nullptr) { available_barrier = i; break; } } if (available_barrier == kMaxSuspendBarriers) { // No barrier spaces available, we can't add another. return false; } tlsPtr_.active_suspend_barriers[available_barrier] = suspend_barrier; flags |= enum_cast(ThreadFlag::kActiveSuspendBarrier); } tls32_.suspend_count += delta; switch (reason) { case SuspendReason::kForUserCode: tls32_.user_code_suspend_count += delta; break; case SuspendReason::kInternal: break; } if (tls32_.suspend_count == 0) { AtomicClearFlag(ThreadFlag::kSuspendRequest); } else { // Two bits might be set simultaneously. tls32_.state_and_flags.fetch_or(flags, std::memory_order_seq_cst); TriggerSuspend(); } return true; } bool Thread::PassActiveSuspendBarriers(Thread* self) { // Grab the suspend_count lock and copy the current set of // barriers. Then clear the list and the flag. The ModifySuspendCount // function requires the lock so we prevent a race between setting // the kActiveSuspendBarrier flag and clearing it. AtomicInteger* pass_barriers[kMaxSuspendBarriers]; { MutexLock mu(self, *Locks::thread_suspend_count_lock_); if (!ReadFlag(ThreadFlag::kActiveSuspendBarrier)) { // quick exit test: the barriers have already been claimed - this is // possible as there may be a race to claim and it doesn't matter // who wins. // All of the callers of this function (except the SuspendAllInternal) // will first test the kActiveSuspendBarrier flag without lock. Here // double-check whether the barrier has been passed with the // suspend_count lock. return false; } for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { pass_barriers[i] = tlsPtr_.active_suspend_barriers[i]; tlsPtr_.active_suspend_barriers[i] = nullptr; } AtomicClearFlag(ThreadFlag::kActiveSuspendBarrier); } uint32_t barrier_count = 0; for (uint32_t i = 0; i < kMaxSuspendBarriers; i++) { AtomicInteger* pending_threads = pass_barriers[i]; if (pending_threads != nullptr) { bool done = false; do { int32_t cur_val = pending_threads->load(std::memory_order_relaxed); CHECK_GT(cur_val, 0) << "Unexpected value for PassActiveSuspendBarriers(): " << cur_val; // Reduce value by 1. done = pending_threads->CompareAndSetWeakRelaxed(cur_val, cur_val - 1); #if ART_USE_FUTEXES if (done && (cur_val - 1) == 0) { // Weak CAS may fail spuriously. futex(pending_threads->Address(), FUTEX_WAKE_PRIVATE, INT_MAX, nullptr, nullptr, 0); } #endif } while (!done); ++barrier_count; } } CHECK_GT(barrier_count, 0U); return true; } void Thread::ClearSuspendBarrier(AtomicInteger* target) { CHECK(ReadFlag(ThreadFlag::kActiveSuspendBarrier)); bool clear_flag = true; for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { AtomicInteger* ptr = tlsPtr_.active_suspend_barriers[i]; if (ptr == target) { tlsPtr_.active_suspend_barriers[i] = nullptr; } else if (ptr != nullptr) { clear_flag = false; } } if (LIKELY(clear_flag)) { AtomicClearFlag(ThreadFlag::kActiveSuspendBarrier); } } void Thread::RunCheckpointFunction() { // If this thread is suspended and another thread is running the checkpoint on its behalf, // we may have a pending flip function that we need to run for the sake of those checkpoints // that need to walk the stack. We should not see the flip function flags when the thread // is running the checkpoint on its own. StateAndFlags state_and_flags = GetStateAndFlags(std::memory_order_relaxed); if (UNLIKELY(state_and_flags.IsAnyOfFlagsSet(FlipFunctionFlags()))) { DCHECK(IsSuspended()); Thread* self = Thread::Current(); DCHECK(self != this); if (state_and_flags.IsFlagSet(ThreadFlag::kPendingFlipFunction)) { EnsureFlipFunctionStarted(self); state_and_flags = GetStateAndFlags(std::memory_order_relaxed); DCHECK(!state_and_flags.IsFlagSet(ThreadFlag::kPendingFlipFunction)); } if (state_and_flags.IsFlagSet(ThreadFlag::kRunningFlipFunction)) { WaitForFlipFunction(self); } } // Grab the suspend_count lock, get the next checkpoint and update all the checkpoint fields. If // there are no more checkpoints we will also clear the kCheckpointRequest flag. Closure* checkpoint; { MutexLock mu(this, *Locks::thread_suspend_count_lock_); checkpoint = tlsPtr_.checkpoint_function; if (!checkpoint_overflow_.empty()) { // Overflow list not empty, copy the first one out and continue. tlsPtr_.checkpoint_function = checkpoint_overflow_.front(); checkpoint_overflow_.pop_front(); } else { // No overflow checkpoints. Clear the kCheckpointRequest flag tlsPtr_.checkpoint_function = nullptr; AtomicClearFlag(ThreadFlag::kCheckpointRequest); } } // Outside the lock, run the checkpoint function. ScopedTrace trace("Run checkpoint function"); CHECK(checkpoint != nullptr) << "Checkpoint flag set without pending checkpoint"; checkpoint->Run(this); } void Thread::RunEmptyCheckpoint() { // Note: Empty checkpoint does not access the thread's stack, // so we do not need to check for the flip function. DCHECK_EQ(Thread::Current(), this); AtomicClearFlag(ThreadFlag::kEmptyCheckpointRequest); Runtime::Current()->GetThreadList()->EmptyCheckpointBarrier()->Pass(this); } bool Thread::RequestCheckpoint(Closure* function) { StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed); if (old_state_and_flags.GetState() != ThreadState::kRunnable) { return false; // Fail, thread is suspended and so can't run a checkpoint. } // We must be runnable to request a checkpoint. DCHECK_EQ(old_state_and_flags.GetState(), ThreadState::kRunnable); StateAndFlags new_state_and_flags = old_state_and_flags; new_state_and_flags.SetFlag(ThreadFlag::kCheckpointRequest); bool success = tls32_.state_and_flags.CompareAndSetStrongSequentiallyConsistent( old_state_and_flags.GetValue(), new_state_and_flags.GetValue()); if (success) { // Succeeded setting checkpoint flag, now insert the actual checkpoint. if (tlsPtr_.checkpoint_function == nullptr) { tlsPtr_.checkpoint_function = function; } else { checkpoint_overflow_.push_back(function); } CHECK(ReadFlag(ThreadFlag::kCheckpointRequest)); TriggerSuspend(); } return success; } bool Thread::RequestEmptyCheckpoint() { StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed); if (old_state_and_flags.GetState() != ThreadState::kRunnable) { // If it's not runnable, we don't need to do anything because it won't be in the middle of a // heap access (eg. the read barrier). return false; } // We must be runnable to request a checkpoint. DCHECK_EQ(old_state_and_flags.GetState(), ThreadState::kRunnable); StateAndFlags new_state_and_flags = old_state_and_flags; new_state_and_flags.SetFlag(ThreadFlag::kEmptyCheckpointRequest); bool success = tls32_.state_and_flags.CompareAndSetStrongSequentiallyConsistent( old_state_and_flags.GetValue(), new_state_and_flags.GetValue()); if (success) { TriggerSuspend(); } return success; } class BarrierClosure : public Closure { public: explicit BarrierClosure(Closure* wrapped) : wrapped_(wrapped), barrier_(0) {} void Run(Thread* self) override { wrapped_->Run(self); barrier_.Pass(self); } void Wait(Thread* self, ThreadState suspend_state) { if (suspend_state != ThreadState::kRunnable) { barrier_.Increment(self, 1); } else { barrier_.Increment(self, 1); } } private: Closure* wrapped_; Barrier barrier_; }; // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. bool Thread::RequestSynchronousCheckpoint(Closure* function, ThreadState suspend_state) { Thread* self = Thread::Current(); if (this == Thread::Current()) { Locks::thread_list_lock_->AssertExclusiveHeld(self); // Unlock the tll before running so that the state is the same regardless of thread. Locks::thread_list_lock_->ExclusiveUnlock(self); // Asked to run on this thread. Just run. function->Run(this); return true; } // The current thread is not this thread. if (GetState() == ThreadState::kTerminated) { Locks::thread_list_lock_->ExclusiveUnlock(self); return false; } struct ScopedThreadListLockUnlock { explicit ScopedThreadListLockUnlock(Thread* self_in) RELEASE(*Locks::thread_list_lock_) : self_thread(self_in) { Locks::thread_list_lock_->AssertHeld(self_thread); Locks::thread_list_lock_->Unlock(self_thread); } ~ScopedThreadListLockUnlock() ACQUIRE(*Locks::thread_list_lock_) { Locks::thread_list_lock_->AssertNotHeld(self_thread); Locks::thread_list_lock_->Lock(self_thread); } Thread* self_thread; }; for (;;) { Locks::thread_list_lock_->AssertExclusiveHeld(self); // If this thread is runnable, try to schedule a checkpoint. Do some gymnastics to not hold the // suspend-count lock for too long. if (GetState() == ThreadState::kRunnable) { BarrierClosure barrier_closure(function); bool installed = false; { MutexLock mu(self, *Locks::thread_suspend_count_lock_); installed = RequestCheckpoint(&barrier_closure); } if (installed) { // Relinquish the thread-list lock. We should not wait holding any locks. We cannot // reacquire it since we don't know if 'this' hasn't been deleted yet. Locks::thread_list_lock_->ExclusiveUnlock(self); ScopedThreadStateChange sts(self, suspend_state); barrier_closure.Wait(self, suspend_state); return true; } // Fall-through. } // This thread is not runnable, make sure we stay suspended, then run the checkpoint. // Note: ModifySuspendCountInternal also expects the thread_list_lock to be held in // certain situations. { MutexLock mu2(self, *Locks::thread_suspend_count_lock_); if (!ModifySuspendCount(self, +1, nullptr, SuspendReason::kInternal)) { // Just retry the loop. sched_yield(); continue; } } { // Release for the wait. The suspension will keep us from being deleted. Reacquire after so // that we can call ModifySuspendCount without racing against ThreadList::Unregister. ScopedThreadListLockUnlock stllu(self); { ScopedThreadStateChange sts(self, suspend_state); while (GetState() == ThreadState::kRunnable) { // We became runnable again. Wait till the suspend triggered in ModifySuspendCount // moves us to suspended. sched_yield(); } } // Ensure that the flip function for this thread, if pending, is finished *before* // the checkpoint function is run. Otherwise, we may end up with both `to' and 'from' // space references on the stack, confusing the GC's thread-flip logic. The caller is // runnable so can't have a pending flip function. DCHECK_EQ(self->GetState(), ThreadState::kRunnable); DCHECK( !self->GetStateAndFlags(std::memory_order_relaxed).IsAnyOfFlagsSet(FlipFunctionFlags())); EnsureFlipFunctionStarted(self); while (GetStateAndFlags(std::memory_order_acquire).IsAnyOfFlagsSet(FlipFunctionFlags())) { sched_yield(); } function->Run(this); } { MutexLock mu2(self, *Locks::thread_suspend_count_lock_); DCHECK_NE(GetState(), ThreadState::kRunnable); bool updated = ModifySuspendCount(self, -1, nullptr, SuspendReason::kInternal); DCHECK(updated); } { // Imitate ResumeAll, the thread may be waiting on Thread::resume_cond_ since we raised its // suspend count. Now the suspend_count_ is lowered so we must do the broadcast. MutexLock mu2(self, *Locks::thread_suspend_count_lock_); Thread::resume_cond_->Broadcast(self); } // Release the thread_list_lock_ to be consistent with the barrier-closure path. Locks::thread_list_lock_->ExclusiveUnlock(self); return true; // We're done, break out of the loop. } } void Thread::SetFlipFunction(Closure* function) { // This is called with all threads suspended, except for the calling thread. DCHECK(IsSuspended() || Thread::Current() == this); DCHECK(function != nullptr); DCHECK(tlsPtr_.flip_function == nullptr); tlsPtr_.flip_function = function; DCHECK(!GetStateAndFlags(std::memory_order_relaxed).IsAnyOfFlagsSet(FlipFunctionFlags())); AtomicSetFlag(ThreadFlag::kPendingFlipFunction, std::memory_order_release); } void Thread::EnsureFlipFunctionStarted(Thread* self) { while (true) { StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed); if (!old_state_and_flags.IsFlagSet(ThreadFlag::kPendingFlipFunction)) { return; } DCHECK(!old_state_and_flags.IsFlagSet(ThreadFlag::kRunningFlipFunction)); StateAndFlags new_state_and_flags = old_state_and_flags.WithFlag(ThreadFlag::kRunningFlipFunction) .WithoutFlag(ThreadFlag::kPendingFlipFunction); if (tls32_.state_and_flags.CompareAndSetWeakAcquire(old_state_and_flags.GetValue(), new_state_and_flags.GetValue())) { RunFlipFunction(self, /*notify=*/ true); DCHECK(!GetStateAndFlags(std::memory_order_relaxed).IsAnyOfFlagsSet(FlipFunctionFlags())); return; } } } void Thread::RunFlipFunction(Thread* self, bool notify) { // This function is called for suspended threads and by the thread running // `ThreadList::FlipThreadRoots()` after we've successfully set the flag // `ThreadFlag::kRunningFlipFunction`. This flag is not set if the thread is // running the flip function right after transitioning to Runnable as // no other thread may run checkpoints on a thread that's actually Runnable. DCHECK_EQ(notify, ReadFlag(ThreadFlag::kRunningFlipFunction)); Closure* flip_function = tlsPtr_.flip_function; tlsPtr_.flip_function = nullptr; DCHECK(flip_function != nullptr); flip_function->Run(this); if (notify) { // Clear the `ThreadFlag::kRunningFlipFunction` and `ThreadFlag::kWaitingForFlipFunction`. // Check if the latter was actually set, indicating that there is at least one waiting thread. constexpr uint32_t kFlagsToClear = enum_cast(ThreadFlag::kRunningFlipFunction) | enum_cast(ThreadFlag::kWaitingForFlipFunction); StateAndFlags old_state_and_flags( tls32_.state_and_flags.fetch_and(~kFlagsToClear, std::memory_order_release)); if (old_state_and_flags.IsFlagSet(ThreadFlag::kWaitingForFlipFunction)) { // Notify all threads that are waiting for completion (at least one). // TODO: Should we create a separate mutex and condition variable instead // of piggy-backing on the `thread_suspend_count_lock_` and `resume_cond_`? MutexLock mu(self, *Locks::thread_suspend_count_lock_); resume_cond_->Broadcast(self); } } } void Thread::WaitForFlipFunction(Thread* self) { // Another thread is running the flip function. Wait for it to complete. // Check the flag while holding the mutex so that we do not miss the broadcast. // Repeat the check after waiting to guard against spurious wakeups (and because // we share the `thread_suspend_count_lock_` and `resume_cond_` with other code). MutexLock mu(self, *Locks::thread_suspend_count_lock_); while (true) { StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_acquire); DCHECK(!old_state_and_flags.IsFlagSet(ThreadFlag::kPendingFlipFunction)); if (!old_state_and_flags.IsFlagSet(ThreadFlag::kRunningFlipFunction)) { DCHECK(!old_state_and_flags.IsAnyOfFlagsSet(FlipFunctionFlags())); break; } if (!old_state_and_flags.IsFlagSet(ThreadFlag::kWaitingForFlipFunction)) { // Mark that there is a waiting thread. StateAndFlags new_state_and_flags = old_state_and_flags.WithFlag(ThreadFlag::kWaitingForFlipFunction); if (!tls32_.state_and_flags.CompareAndSetWeakRelaxed(old_state_and_flags.GetValue(), new_state_and_flags.GetValue())) { continue; // Retry. } } resume_cond_->Wait(self); } } void Thread::FullSuspendCheck(bool implicit) { ScopedTrace trace(__FUNCTION__); VLOG(threads) << this << " self-suspending"; // Make thread appear suspended to other threads, release mutator_lock_. // Transition to suspended and back to runnable, re-acquire share on mutator_lock_. ScopedThreadSuspension(this, ThreadState::kSuspended); // NOLINT if (implicit) { // For implicit suspend check we want to `madvise()` away // the alternate signal stack to avoid wasting memory. MadviseAwayAlternateSignalStack(); } VLOG(threads) << this << " self-reviving"; } static std::string GetSchedulerGroupName(pid_t tid) { // /proc//cgroup looks like this: // 2:devices:/ // 1:cpuacct,cpu:/ // We want the third field from the line whose second field contains the "cpu" token. std::string cgroup_file; if (!android::base::ReadFileToString(StringPrintf("/proc/self/task/%d/cgroup", tid), &cgroup_file)) { return ""; } std::vector cgroup_lines; Split(cgroup_file, '\n', &cgroup_lines); for (size_t i = 0; i < cgroup_lines.size(); ++i) { std::vector cgroup_fields; Split(cgroup_lines[i], ':', &cgroup_fields); std::vector cgroups; Split(cgroup_fields[1], ',', &cgroups); for (size_t j = 0; j < cgroups.size(); ++j) { if (cgroups[j] == "cpu") { return cgroup_fields[2].substr(1); // Skip the leading slash. } } } return ""; } void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) { std::string group_name; int priority; bool is_daemon = false; Thread* self = Thread::Current(); // Don't do this if we are aborting since the GC may have all the threads suspended. This will // cause ScopedObjectAccessUnchecked to deadlock. if (gAborting == 0 && self != nullptr && thread != nullptr && thread->tlsPtr_.opeer != nullptr) { ScopedObjectAccessUnchecked soa(self); priority = WellKnownClasses::java_lang_Thread_priority->GetInt(thread->tlsPtr_.opeer); is_daemon = WellKnownClasses::java_lang_Thread_daemon->GetBoolean(thread->tlsPtr_.opeer); ObjPtr thread_group = WellKnownClasses::java_lang_Thread_group->GetObject(thread->tlsPtr_.opeer); if (thread_group != nullptr) { ObjPtr group_name_object = WellKnownClasses::java_lang_ThreadGroup_name->GetObject(thread_group); group_name = (group_name_object != nullptr) ? group_name_object->AsString()->ToModifiedUtf8() : ""; } } else if (thread != nullptr) { priority = thread->GetNativePriority(); } else { palette_status_t status = PaletteSchedGetPriority(tid, &priority); CHECK(status == PALETTE_STATUS_OK || status == PALETTE_STATUS_CHECK_ERRNO); } std::string scheduler_group_name(GetSchedulerGroupName(tid)); if (scheduler_group_name.empty()) { scheduler_group_name = "default"; } if (thread != nullptr) { thread->tls32_.num_name_readers.fetch_add(1, std::memory_order_seq_cst); os << '"' << thread->tlsPtr_.name.load() << '"'; thread->tls32_.num_name_readers.fetch_sub(1 /* at least memory_order_release */); if (is_daemon) { os << " daemon"; } os << " prio=" << priority << " tid=" << thread->GetThreadId() << " " << thread->GetState(); if (thread->IsStillStarting()) { os << " (still starting up)"; } if (thread->tls32_.disable_thread_flip_count != 0) { os << " DisableFlipCount = " << thread->tls32_.disable_thread_flip_count; } os << "\n"; } else { os << '"' << ::art::GetThreadName(tid) << '"' << " prio=" << priority << " (not attached)\n"; } if (thread != nullptr) { auto suspend_log_fn = [&]() REQUIRES(Locks::thread_suspend_count_lock_) { StateAndFlags state_and_flags = thread->GetStateAndFlags(std::memory_order_relaxed); static_assert( static_cast>(ThreadState::kRunnable) == 0u); state_and_flags.SetState(ThreadState::kRunnable); // Clear state bits. os << " | group=\"" << group_name << "\"" << " sCount=" << thread->tls32_.suspend_count << " ucsCount=" << thread->tls32_.user_code_suspend_count << " flags=" << state_and_flags.GetValue() << " obj=" << reinterpret_cast(thread->tlsPtr_.opeer) << " self=" << reinterpret_cast(thread) << "\n"; }; if (Locks::thread_suspend_count_lock_->IsExclusiveHeld(self)) { Locks::thread_suspend_count_lock_->AssertExclusiveHeld(self); // For annotalysis. suspend_log_fn(); } else { MutexLock mu(self, *Locks::thread_suspend_count_lock_); suspend_log_fn(); } } os << " | sysTid=" << tid << " nice=" << getpriority(PRIO_PROCESS, static_cast(tid)) << " cgrp=" << scheduler_group_name; if (thread != nullptr) { int policy; sched_param sp; #if !defined(__APPLE__) // b/36445592 Don't use pthread_getschedparam since pthread may have exited. policy = sched_getscheduler(tid); if (policy == -1) { PLOG(WARNING) << "sched_getscheduler(" << tid << ")"; } int sched_getparam_result = sched_getparam(tid, &sp); if (sched_getparam_result == -1) { PLOG(WARNING) << "sched_getparam(" << tid << ", &sp)"; sp.sched_priority = -1; } #else CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->tlsPtr_.pthread_self, &policy, &sp), __FUNCTION__); #endif os << " sched=" << policy << "/" << sp.sched_priority << " handle=" << reinterpret_cast(thread->tlsPtr_.pthread_self); } os << "\n"; // Grab the scheduler stats for this thread. std::string scheduler_stats; if (android::base::ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats) && !scheduler_stats.empty()) { scheduler_stats = android::base::Trim(scheduler_stats); // Lose the trailing '\n'. } else { scheduler_stats = "0 0 0"; } char native_thread_state = '?'; int utime = 0; int stime = 0; int task_cpu = 0; GetTaskStats(tid, &native_thread_state, &utime, &stime, &task_cpu); os << " | state=" << native_thread_state << " schedstat=( " << scheduler_stats << " )" << " utm=" << utime << " stm=" << stime << " core=" << task_cpu << " HZ=" << sysconf(_SC_CLK_TCK) << "\n"; if (thread != nullptr) { os << " | stack=" << reinterpret_cast(thread->tlsPtr_.stack_begin) << "-" << reinterpret_cast(thread->tlsPtr_.stack_end) << " stackSize=" << PrettySize(thread->tlsPtr_.stack_size) << "\n"; // Dump the held mutexes. os << " | held mutexes="; for (size_t i = 0; i < kLockLevelCount; ++i) { if (i != kMonitorLock) { BaseMutex* mutex = thread->GetHeldMutex(static_cast(i)); if (mutex != nullptr) { os << " \"" << mutex->GetName() << "\""; if (mutex->IsReaderWriterMutex()) { ReaderWriterMutex* rw_mutex = down_cast(mutex); if (rw_mutex->GetExclusiveOwnerTid() == tid) { os << "(exclusive held)"; } else { os << "(shared held)"; } } } } } os << "\n"; } } void Thread::DumpState(std::ostream& os) const { Thread::DumpState(os, this, GetTid()); } struct StackDumpVisitor : public MonitorObjectsStackVisitor { StackDumpVisitor(std::ostream& os_in, Thread* thread_in, Context* context, bool can_allocate, bool check_suspended = true, bool dump_locks = true) REQUIRES_SHARED(Locks::mutator_lock_) : MonitorObjectsStackVisitor(thread_in, context, check_suspended, can_allocate && dump_locks), os(os_in), last_method(nullptr), last_line_number(0), repetition_count(0) {} virtual ~StackDumpVisitor() { if (frame_count == 0) { os << " (no managed stack frames)\n"; } } static constexpr size_t kMaxRepetition = 3u; VisitMethodResult StartMethod(ArtMethod* m, size_t frame_nr ATTRIBUTE_UNUSED) override REQUIRES_SHARED(Locks::mutator_lock_) { m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize); ObjPtr dex_cache = m->GetDexCache(); int line_number = -1; uint32_t dex_pc = GetDexPc(false); if (dex_cache != nullptr) { // be tolerant of bad input const DexFile* dex_file = dex_cache->GetDexFile(); line_number = annotations::GetLineNumFromPC(dex_file, m, dex_pc); } if (line_number == last_line_number && last_method == m) { ++repetition_count; } else { if (repetition_count >= kMaxRepetition) { os << " ... repeated " << (repetition_count - kMaxRepetition) << " times\n"; } repetition_count = 0; last_line_number = line_number; last_method = m; } if (repetition_count >= kMaxRepetition) { // Skip visiting=printing anything. return VisitMethodResult::kSkipMethod; } os << " at " << m->PrettyMethod(false); if (m->IsNative()) { os << "(Native method)"; } else { const char* source_file(m->GetDeclaringClassSourceFile()); if (line_number == -1) { // If we failed to map to a line number, use // the dex pc as the line number and leave source file null source_file = nullptr; line_number = static_cast(dex_pc); } os << "(" << (source_file != nullptr ? source_file : "unavailable") << ":" << line_number << ")"; } os << "\n"; // Go and visit locks. return VisitMethodResult::kContinueMethod; } VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) override { return VisitMethodResult::kContinueMethod; } void VisitWaitingObject(ObjPtr obj, ThreadState state ATTRIBUTE_UNUSED) override REQUIRES_SHARED(Locks::mutator_lock_) { PrintObject(obj, " - waiting on ", ThreadList::kInvalidThreadId); } void VisitSleepingObject(ObjPtr obj) override REQUIRES_SHARED(Locks::mutator_lock_) { PrintObject(obj, " - sleeping on ", ThreadList::kInvalidThreadId); } void VisitBlockedOnObject(ObjPtr obj, ThreadState state, uint32_t owner_tid) override REQUIRES_SHARED(Locks::mutator_lock_) { const char* msg; switch (state) { case ThreadState::kBlocked: msg = " - waiting to lock "; break; case ThreadState::kWaitingForLockInflation: msg = " - waiting for lock inflation of "; break; default: LOG(FATAL) << "Unreachable"; UNREACHABLE(); } PrintObject(obj, msg, owner_tid); num_blocked++; } void VisitLockedObject(ObjPtr obj) override REQUIRES_SHARED(Locks::mutator_lock_) { PrintObject(obj, " - locked ", ThreadList::kInvalidThreadId); num_locked++; } void PrintObject(ObjPtr obj, const char* msg, uint32_t owner_tid) REQUIRES_SHARED(Locks::mutator_lock_) { if (obj == nullptr) { os << msg << "an unknown object"; } else { if ((obj->GetLockWord(true).GetState() == LockWord::kThinLocked) && Locks::mutator_lock_->IsExclusiveHeld(Thread::Current())) { // Getting the identity hashcode here would result in lock inflation and suspension of the // current thread, which isn't safe if this is the only runnable thread. os << msg << StringPrintf("<@addr=0x%" PRIxPTR "> (a %s)", reinterpret_cast(obj.Ptr()), obj->PrettyTypeOf().c_str()); } else { // - waiting on <0x6008c468> (a java.lang.Class) // Call PrettyTypeOf before IdentityHashCode since IdentityHashCode can cause thread // suspension and move pretty_object. const std::string pretty_type(obj->PrettyTypeOf()); os << msg << StringPrintf("<0x%08x> (a %s)", obj->IdentityHashCode(), pretty_type.c_str()); } } if (owner_tid != ThreadList::kInvalidThreadId) { os << " held by thread " << owner_tid; } os << "\n"; } std::ostream& os; ArtMethod* last_method; int last_line_number; size_t repetition_count; size_t num_blocked = 0; size_t num_locked = 0; }; static bool ShouldShowNativeStack(const Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_) { ThreadState state = thread->GetState(); // In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting. if (state > ThreadState::kWaiting && state < ThreadState::kStarting) { return true; } // In an Object.wait variant or Thread.sleep? That's not interesting. if (state == ThreadState::kTimedWaiting || state == ThreadState::kSleeping || state == ThreadState::kWaiting) { return false; } // Threads with no managed stack frames should be shown. if (!thread->HasManagedStack()) { return true; } // In some other native method? That's interesting. // We don't just check kNative because native methods will be in state kSuspended if they're // calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the // thread-startup states if it's early enough in their life cycle (http://b/7432159). ArtMethod* current_method = thread->GetCurrentMethod(nullptr); return current_method != nullptr && current_method->IsNative(); } Thread::DumpOrder Thread::DumpJavaStack(std::ostream& os, bool check_suspended, bool dump_locks) const { // Dumping the Java stack involves the verifier for locks. The verifier operates under the // assumption that there is no exception pending on entry. Thus, stash any pending exception. // Thread::Current() instead of this in case a thread is dumping the stack of another suspended // thread. ScopedExceptionStorage ses(Thread::Current()); std::unique_ptr context(Context::Create()); StackDumpVisitor dumper(os, const_cast(this), context.get(), !tls32_.throwing_OutOfMemoryError, check_suspended, dump_locks); dumper.WalkStack(); if (IsJitSensitiveThread()) { return DumpOrder::kMain; } else if (dumper.num_blocked > 0) { return DumpOrder::kBlocked; } else if (dumper.num_locked > 0) { return DumpOrder::kLocked; } else { return DumpOrder::kDefault; } } Thread::DumpOrder Thread::DumpStack(std::ostream& os, bool dump_native_stack, bool force_dump_stack) const { unwindstack::AndroidLocalUnwinder unwinder; return DumpStack(os, unwinder, dump_native_stack, force_dump_stack); } Thread::DumpOrder Thread::DumpStack(std::ostream& os, unwindstack::AndroidLocalUnwinder& unwinder, bool dump_native_stack, bool force_dump_stack) const { // TODO: we call this code when dying but may not have suspended the thread ourself. The // IsSuspended check is therefore racy with the use for dumping (normally we inhibit // the race with the thread_suspend_count_lock_). bool dump_for_abort = (gAborting > 0); bool safe_to_dump = (this == Thread::Current() || IsSuspended()); if (!kIsDebugBuild) { // We always want to dump the stack for an abort, however, there is no point dumping another // thread's stack in debug builds where we'll hit the not suspended check in the stack walk. safe_to_dump = (safe_to_dump || dump_for_abort); } DumpOrder dump_order = DumpOrder::kDefault; if (safe_to_dump || force_dump_stack) { // If we're currently in native code, dump that stack before dumping the managed stack. if (dump_native_stack && (dump_for_abort || force_dump_stack || ShouldShowNativeStack(this))) { ArtMethod* method = GetCurrentMethod(nullptr, /*check_suspended=*/ !force_dump_stack, /*abort_on_error=*/ !(dump_for_abort || force_dump_stack)); DumpNativeStack(os, unwinder, GetTid(), " native: ", method); } dump_order = DumpJavaStack(os, /*check_suspended=*/ !force_dump_stack, /*dump_locks=*/ !force_dump_stack); } else { os << "Not able to dump stack of thread that isn't suspended"; } return dump_order; } void Thread::ThreadExitCallback(void* arg) { Thread* self = reinterpret_cast(arg); if (self->tls32_.thread_exit_check_count == 0) { LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's " "going to use a pthread_key_create destructor?): " << *self; CHECK(is_started_); #ifdef __BIONIC__ __get_tls()[TLS_SLOT_ART_THREAD_SELF] = self; #else CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self"); Thread::self_tls_ = self; #endif self->tls32_.thread_exit_check_count = 1; } else { LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self; } } void Thread::Startup() { CHECK(!is_started_); is_started_ = true; { // MutexLock to keep annotalysis happy. // // Note we use null for the thread because Thread::Current can // return garbage since (is_started_ == true) and // Thread::pthread_key_self_ is not yet initialized. // This was seen on glibc. MutexLock mu(nullptr, *Locks::thread_suspend_count_lock_); resume_cond_ = new ConditionVariable("Thread resumption condition variable", *Locks::thread_suspend_count_lock_); } // Allocate a TLS slot. CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback), "self key"); // Double-check the TLS slot allocation. if (pthread_getspecific(pthread_key_self_) != nullptr) { LOG(FATAL) << "Newly-created pthread TLS slot is not nullptr"; } #ifndef __BIONIC__ CHECK(Thread::self_tls_ == nullptr); #endif } void Thread::FinishStartup() { Runtime* runtime = Runtime::Current(); CHECK(runtime->IsStarted()); // Finish attaching the main thread. ScopedObjectAccess soa(Thread::Current()); soa.Self()->CreatePeer("main", false, runtime->GetMainThreadGroup()); soa.Self()->AssertNoPendingException(); runtime->RunRootClinits(soa.Self()); // The thread counts as started from now on. We need to add it to the ThreadGroup. For regular // threads, this is done in Thread.start() on the Java side. soa.Self()->NotifyThreadGroup(soa, runtime->GetMainThreadGroup()); soa.Self()->AssertNoPendingException(); } void Thread::Shutdown() { CHECK(is_started_); is_started_ = false; CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key"); MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_); if (resume_cond_ != nullptr) { delete resume_cond_; resume_cond_ = nullptr; } } void Thread::NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable& soa, jobject thread_group) { ObjPtr thread_object = soa.Self()->GetPeer(); ObjPtr thread_group_object = soa.Decode(thread_group); if (thread_group == nullptr || kIsDebugBuild) { // There is always a group set. Retrieve it. thread_group_object = WellKnownClasses::java_lang_Thread_group->GetObject(thread_object); if (kIsDebugBuild && thread_group != nullptr) { CHECK(thread_group_object == soa.Decode(thread_group)); } } WellKnownClasses::java_lang_ThreadGroup_add->InvokeVirtual<'V', 'L'>( soa.Self(), thread_group_object, thread_object); } Thread::Thread(bool daemon) : tls32_(daemon), wait_monitor_(nullptr), is_runtime_thread_(false) { wait_mutex_ = new Mutex("a thread wait mutex", LockLevel::kThreadWaitLock); wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_); tlsPtr_.mutator_lock = Locks::mutator_lock_; DCHECK(tlsPtr_.mutator_lock != nullptr); tlsPtr_.name.store(kThreadNameDuringStartup, std::memory_order_relaxed); static_assert((sizeof(Thread) % 4) == 0U, "art::Thread has a size which is not a multiple of 4."); DCHECK_EQ(GetStateAndFlags(std::memory_order_relaxed).GetValue(), 0u); StateAndFlags state_and_flags = StateAndFlags(0u).WithState(ThreadState::kNative); tls32_.state_and_flags.store(state_and_flags.GetValue(), std::memory_order_relaxed); tls32_.interrupted.store(false, std::memory_order_relaxed); // Initialize with no permit; if the java Thread was unparked before being // started, it will unpark itself before calling into java code. tls32_.park_state_.store(kNoPermit, std::memory_order_relaxed); memset(&tlsPtr_.held_mutexes[0], 0, sizeof(tlsPtr_.held_mutexes)); std::fill(tlsPtr_.rosalloc_runs, tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBracketsInThread, gc::allocator::RosAlloc::GetDedicatedFullRun()); tlsPtr_.checkpoint_function = nullptr; for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { tlsPtr_.active_suspend_barriers[i] = nullptr; } tlsPtr_.flip_function = nullptr; tlsPtr_.thread_local_mark_stack = nullptr; tls32_.is_transitioning_to_runnable = false; ResetTlab(); } bool Thread::CanLoadClasses() const { return !IsRuntimeThread() || !Runtime::Current()->IsJavaDebuggable(); } bool Thread::IsStillStarting() const { // You might think you can check whether the state is kStarting, but for much of thread startup, // the thread is in kNative; it might also be in kVmWait. // You might think you can check whether the peer is null, but the peer is actually created and // assigned fairly early on, and needs to be. // It turns out that the last thing to change is the thread name; that's a good proxy for "has // this thread _ever_ entered kRunnable". return (tlsPtr_.jpeer == nullptr && tlsPtr_.opeer == nullptr) || (tlsPtr_.name.load() == kThreadNameDuringStartup); } void Thread::AssertPendingException() const { CHECK(IsExceptionPending()) << "Pending exception expected."; } void Thread::AssertPendingOOMException() const { AssertPendingException(); auto* e = GetException(); CHECK_EQ(e->GetClass(), WellKnownClasses::java_lang_OutOfMemoryError.Get()) << e->Dump(); } void Thread::AssertNoPendingException() const { if (UNLIKELY(IsExceptionPending())) { ScopedObjectAccess soa(Thread::Current()); LOG(FATAL) << "No pending exception expected: " << GetException()->Dump(); } } void Thread::AssertNoPendingExceptionForNewException(const char* msg) const { if (UNLIKELY(IsExceptionPending())) { ScopedObjectAccess soa(Thread::Current()); LOG(FATAL) << "Throwing new exception '" << msg << "' with unexpected pending exception: " << GetException()->Dump(); } } class MonitorExitVisitor : public SingleRootVisitor { public: explicit MonitorExitVisitor(Thread* self) : self_(self) { } // NO_THREAD_SAFETY_ANALYSIS due to MonitorExit. void VisitRoot(mirror::Object* entered_monitor, const RootInfo& info ATTRIBUTE_UNUSED) override NO_THREAD_SAFETY_ANALYSIS { if (self_->HoldsLock(entered_monitor)) { LOG(WARNING) << "Calling MonitorExit on object " << entered_monitor << " (" << entered_monitor->PrettyTypeOf() << ")" << " left locked by native thread " << *Thread::Current() << " which is detaching"; entered_monitor->MonitorExit(self_); } } private: Thread* const self_; }; void Thread::Destroy(bool should_run_callbacks) { Thread* self = this; DCHECK_EQ(self, Thread::Current()); if (tlsPtr_.jni_env != nullptr) { { ScopedObjectAccess soa(self); MonitorExitVisitor visitor(self); // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited. tlsPtr_.jni_env->monitors_.VisitRoots(&visitor, RootInfo(kRootVMInternal)); } // Release locally held global references which releasing may require the mutator lock. if (tlsPtr_.jpeer != nullptr) { // If pthread_create fails we don't have a jni env here. tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.jpeer); tlsPtr_.jpeer = nullptr; } if (tlsPtr_.class_loader_override != nullptr) { tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.class_loader_override); tlsPtr_.class_loader_override = nullptr; } } if (tlsPtr_.opeer != nullptr) { ScopedObjectAccess soa(self); if (UNLIKELY(self->GetMethodTraceBuffer() != nullptr)) { Trace::FlushThreadBuffer(self); self->ResetMethodTraceBuffer(); } // We may need to call user-supplied managed code, do this before final clean-up. HandleUncaughtExceptions(); RemoveFromThreadGroup(); Runtime* runtime = Runtime::Current(); if (runtime != nullptr && should_run_callbacks) { runtime->GetRuntimeCallbacks()->ThreadDeath(self); } // this.nativePeer = 0; SetNativePeer(tlsPtr_.opeer, nullptr); // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone // who is waiting. ObjPtr lock = WellKnownClasses::java_lang_Thread_lock->GetObject(tlsPtr_.opeer); // (This conditional is only needed for tests, where Thread.lock won't have been set.) if (lock != nullptr) { StackHandleScope<1> hs(self); Handle h_obj(hs.NewHandle(lock)); ObjectLock locker(self, h_obj); locker.NotifyAll(); } tlsPtr_.opeer = nullptr; } { ScopedObjectAccess soa(self); Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this); } // Mark-stack revocation must be performed at the very end. No // checkpoint/flip-function or read-barrier should be called after this. if (gUseReadBarrier) { Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->RevokeThreadLocalMarkStack(this); } } Thread::~Thread() { CHECK(tlsPtr_.class_loader_override == nullptr); CHECK(tlsPtr_.jpeer == nullptr); CHECK(tlsPtr_.opeer == nullptr); bool initialized = (tlsPtr_.jni_env != nullptr); // Did Thread::Init run? if (initialized) { delete tlsPtr_.jni_env; tlsPtr_.jni_env = nullptr; } CHECK_NE(GetState(), ThreadState::kRunnable); CHECK(!ReadFlag(ThreadFlag::kCheckpointRequest)); CHECK(!ReadFlag(ThreadFlag::kEmptyCheckpointRequest)); CHECK(tlsPtr_.checkpoint_function == nullptr); CHECK_EQ(checkpoint_overflow_.size(), 0u); CHECK(tlsPtr_.flip_function == nullptr); CHECK_EQ(tls32_.is_transitioning_to_runnable, false); // Make sure we processed all deoptimization requests. CHECK(tlsPtr_.deoptimization_context_stack == nullptr) << "Missed deoptimization"; CHECK(tlsPtr_.frame_id_to_shadow_frame == nullptr) << "Not all deoptimized frames have been consumed by the debugger."; // We may be deleting a still born thread. SetStateUnsafe(ThreadState::kTerminated); delete wait_cond_; delete wait_mutex_; if (tlsPtr_.long_jump_context != nullptr) { delete tlsPtr_.long_jump_context; } if (initialized) { CleanupCpu(); } SetCachedThreadName(nullptr); // Deallocate name. delete tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample; if (tlsPtr_.method_trace_buffer != nullptr) { delete[] tlsPtr_.method_trace_buffer; } Runtime::Current()->GetHeap()->AssertThreadLocalBuffersAreRevoked(this); TearDownAlternateSignalStack(); } void Thread::HandleUncaughtExceptions() { Thread* self = this; DCHECK_EQ(self, Thread::Current()); if (!self->IsExceptionPending()) { return; } // Get and clear the exception. ObjPtr exception = self->GetException(); self->ClearException(); // Call the Thread instance's dispatchUncaughtException(Throwable) WellKnownClasses::java_lang_Thread_dispatchUncaughtException->InvokeFinal<'V', 'L'>( self, tlsPtr_.opeer, exception); // If the dispatchUncaughtException threw, clear that exception too. self->ClearException(); } void Thread::RemoveFromThreadGroup() { Thread* self = this; DCHECK_EQ(self, Thread::Current()); // this.group.threadTerminated(this); // group can be null if we're in the compiler or a test. ObjPtr group = WellKnownClasses::java_lang_Thread_group->GetObject(tlsPtr_.opeer); if (group != nullptr) { WellKnownClasses::java_lang_ThreadGroup_threadTerminated->InvokeVirtual<'V', 'L'>( self, group, tlsPtr_.opeer); } } template class JniTransitionReferenceVisitor : public StackVisitor { public: JniTransitionReferenceVisitor(Thread* thread, void* obj) REQUIRES_SHARED(Locks::mutator_lock_) : StackVisitor(thread, /*context=*/ nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames), obj_(obj), found_(false) {} bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); if (!m->IsNative() || m->IsCriticalNative()) { return true; } if (kPointsToStack) { uint8_t* sp = reinterpret_cast(GetCurrentQuickFrame()); size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes(); uint32_t* current_vreg = reinterpret_cast(sp + frame_size + sizeof(ArtMethod*)); if (!m->IsStatic()) { if (current_vreg == obj_) { found_ = true; return false; } current_vreg += 1u; } uint32_t shorty_length; const char* shorty = m->GetShorty(&shorty_length); for (size_t i = 1; i != shorty_length; ++i) { switch (shorty[i]) { case 'D': case 'J': current_vreg += 2u; break; case 'L': if (current_vreg == obj_) { found_ = true; return false; } FALLTHROUGH_INTENDED; default: current_vreg += 1u; break; } } // Continue only if the object is somewhere higher on the stack. return obj_ >= current_vreg; } else { // if (kPointsToStack) if (m->IsStatic() && obj_ == m->GetDeclaringClassAddressWithoutBarrier()) { found_ = true; return false; } return true; } } bool Found() const { return found_; } private: void* obj_; bool found_; }; bool Thread::IsJniTransitionReference(jobject obj) const { DCHECK(obj != nullptr); // We need a non-const pointer for stack walk even if we're not modifying the thread state. Thread* thread = const_cast(this); uint8_t* raw_obj = reinterpret_cast(obj); if (static_cast(raw_obj - tlsPtr_.stack_begin) < tlsPtr_.stack_size) { JniTransitionReferenceVisitor visitor(thread, raw_obj); visitor.WalkStack(); return visitor.Found(); } else { JniTransitionReferenceVisitor visitor(thread, raw_obj); visitor.WalkStack(); return visitor.Found(); } } void Thread::HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) { BufferedRootVisitor buffered_visitor( visitor, RootInfo(kRootNativeStack, thread_id)); for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) { cur->VisitRoots(buffered_visitor); } } ObjPtr Thread::DecodeGlobalJObject(jobject obj) const { DCHECK(obj != nullptr); IndirectRef ref = reinterpret_cast(obj); IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref); DCHECK_NE(kind, kJniTransition); DCHECK_NE(kind, kLocal); ObjPtr result; bool expect_null = false; if (kind == kGlobal) { result = tlsPtr_.jni_env->vm_->DecodeGlobal(ref); } else { DCHECK_EQ(kind, kWeakGlobal); result = tlsPtr_.jni_env->vm_->DecodeWeakGlobal(const_cast(this), ref); if (Runtime::Current()->IsClearedJniWeakGlobal(result)) { // This is a special case where it's okay to return null. expect_null = true; result = nullptr; } } DCHECK(expect_null || result != nullptr) << "use of deleted " << ToStr(kind).c_str() << " " << static_cast(obj); return result; } bool Thread::IsJWeakCleared(jweak obj) const { CHECK(obj != nullptr); IndirectRef ref = reinterpret_cast(obj); IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref); CHECK_EQ(kind, kWeakGlobal); return tlsPtr_.jni_env->vm_->IsWeakGlobalCleared(const_cast(this), ref); } // Implements java.lang.Thread.interrupted. bool Thread::Interrupted() { DCHECK_EQ(Thread::Current(), this); // No other thread can concurrently reset the interrupted flag. bool interrupted = tls32_.interrupted.load(std::memory_order_seq_cst); if (interrupted) { tls32_.interrupted.store(false, std::memory_order_seq_cst); } return interrupted; } // Implements java.lang.Thread.isInterrupted. bool Thread::IsInterrupted() { return tls32_.interrupted.load(std::memory_order_seq_cst); } void Thread::Interrupt(Thread* self) { { MutexLock mu(self, *wait_mutex_); if (tls32_.interrupted.load(std::memory_order_seq_cst)) { return; } tls32_.interrupted.store(true, std::memory_order_seq_cst); NotifyLocked(self); } Unpark(); } void Thread::Notify() { Thread* self = Thread::Current(); MutexLock mu(self, *wait_mutex_); NotifyLocked(self); } void Thread::NotifyLocked(Thread* self) { if (wait_monitor_ != nullptr) { wait_cond_->Signal(self); } } void Thread::SetClassLoaderOverride(jobject class_loader_override) { if (tlsPtr_.class_loader_override != nullptr) { GetJniEnv()->DeleteGlobalRef(tlsPtr_.class_loader_override); } tlsPtr_.class_loader_override = GetJniEnv()->NewGlobalRef(class_loader_override); } using ArtMethodDexPcPair = std::pair; // Counts the stack trace depth and also fetches the first max_saved_frames frames. class FetchStackTraceVisitor : public StackVisitor { public: explicit FetchStackTraceVisitor(Thread* thread, ArtMethodDexPcPair* saved_frames = nullptr, size_t max_saved_frames = 0) REQUIRES_SHARED(Locks::mutator_lock_) : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), saved_frames_(saved_frames), max_saved_frames_(max_saved_frames) {} bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) { // We want to skip frames up to and including the exception's constructor. // Note we also skip the frame if it doesn't have a method (namely the callee // save frame) ArtMethod* m = GetMethod(); if (skipping_ && !m->IsRuntimeMethod() && !GetClassRoot()->IsAssignableFrom(m->GetDeclaringClass())) { skipping_ = false; } if (!skipping_) { if (!m->IsRuntimeMethod()) { // Ignore runtime frames (in particular callee save). if (depth_ < max_saved_frames_) { saved_frames_[depth_].first = m; saved_frames_[depth_].second = m->IsProxyMethod() ? dex::kDexNoIndex : GetDexPc(); } ++depth_; } } else { ++skip_depth_; } return true; } uint32_t GetDepth() const { return depth_; } uint32_t GetSkipDepth() const { return skip_depth_; } private: uint32_t depth_ = 0; uint32_t skip_depth_ = 0; bool skipping_ = true; ArtMethodDexPcPair* saved_frames_; const size_t max_saved_frames_; DISALLOW_COPY_AND_ASSIGN(FetchStackTraceVisitor); }; class BuildInternalStackTraceVisitor : public StackVisitor { public: BuildInternalStackTraceVisitor(Thread* self, Thread* thread, uint32_t skip_depth) : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), self_(self), skip_depth_(skip_depth), pointer_size_(Runtime::Current()->GetClassLinker()->GetImagePointerSize()) {} bool Init(uint32_t depth) REQUIRES_SHARED(Locks::mutator_lock_) ACQUIRE(Roles::uninterruptible_) { // Allocate method trace as an object array where the first element is a pointer array that // contains the ArtMethod pointers and dex PCs. The rest of the elements are the declaring // class of the ArtMethod pointers. ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); StackHandleScope<1> hs(self_); ObjPtr array_class = GetClassRoot>(class_linker); // The first element is the methods and dex pc array, the other elements are declaring classes // for the methods to ensure classes in the stack trace don't get unloaded. Handle> trace( hs.NewHandle(mirror::ObjectArray::Alloc( hs.Self(), array_class, static_cast(depth) + 1))); if (trace == nullptr) { // Acquire uninterruptible_ in all paths. self_->StartAssertNoThreadSuspension("Building internal stack trace"); self_->AssertPendingOOMException(); return false; } ObjPtr methods_and_pcs = class_linker->AllocPointerArray(self_, depth * 2); const char* last_no_suspend_cause = self_->StartAssertNoThreadSuspension("Building internal stack trace"); if (methods_and_pcs == nullptr) { self_->AssertPendingOOMException(); return false; } trace->Set(0, methods_and_pcs); trace_ = trace.Get(); // If We are called from native, use non-transactional mode. CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause; return true; } virtual ~BuildInternalStackTraceVisitor() RELEASE(Roles::uninterruptible_) { self_->EndAssertNoThreadSuspension(nullptr); } bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) { if (trace_ == nullptr) { return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError. } if (skip_depth_ > 0) { skip_depth_--; return true; } ArtMethod* m = GetMethod(); if (m->IsRuntimeMethod()) { return true; // Ignore runtime frames (in particular callee save). } AddFrame(m, m->IsProxyMethod() ? dex::kDexNoIndex : GetDexPc()); return true; } void AddFrame(ArtMethod* method, uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) { ObjPtr methods_and_pcs = GetTraceMethodsAndPCs(); methods_and_pcs->SetElementPtrSize( count_, method, pointer_size_); methods_and_pcs->SetElementPtrSize( static_cast(methods_and_pcs->GetLength()) / 2 + count_, dex_pc, pointer_size_); // Save the declaring class of the method to ensure that the declaring classes of the methods // do not get unloaded while the stack trace is live. However, this does not work for copied // methods because the declaring class of a copied method points to an interface class which // may be in a different class loader. Instead, retrieve the class loader associated with the // allocator that holds the copied method. This is much cheaper than finding the actual class. ObjPtr keep_alive; if (UNLIKELY(method->IsCopied())) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); keep_alive = class_linker->GetHoldingClassLoaderOfCopiedMethod(self_, method); } else { keep_alive = method->GetDeclaringClass(); } trace_->Set( static_cast(count_) + 1, keep_alive); ++count_; } ObjPtr GetTraceMethodsAndPCs() const REQUIRES_SHARED(Locks::mutator_lock_) { return ObjPtr::DownCast(trace_->Get(0)); } mirror::ObjectArray* GetInternalStackTrace() const { return trace_; } private: Thread* const self_; // How many more frames to skip. uint32_t skip_depth_; // Current position down stack trace. uint32_t count_ = 0; // An object array where the first element is a pointer array that contains the `ArtMethod` // pointers on the stack and dex PCs. The rest of the elements are referencing objects // that shall keep the methods alive, namely the declaring class of the `ArtMethod` for // declared methods and the class loader for copied methods (because it's faster to find // the class loader than the actual class that holds the copied method). The `trace_[i+1]` // contains the declaring class or class loader of the `ArtMethod` of the i'th frame. // We're initializing a newly allocated trace, so we do not need to record that under // a transaction. If the transaction is aborted, the whole trace shall be unreachable. mirror::ObjectArray* trace_ = nullptr; // For cross compilation. const PointerSize pointer_size_; DISALLOW_COPY_AND_ASSIGN(BuildInternalStackTraceVisitor); }; jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const { // Compute depth of stack, save frames if possible to avoid needing to recompute many. constexpr size_t kMaxSavedFrames = 256; std::unique_ptr saved_frames(new ArtMethodDexPcPair[kMaxSavedFrames]); FetchStackTraceVisitor count_visitor(const_cast(this), &saved_frames[0], kMaxSavedFrames); count_visitor.WalkStack(); const uint32_t depth = count_visitor.GetDepth(); const uint32_t skip_depth = count_visitor.GetSkipDepth(); // Build internal stack trace. BuildInternalStackTraceVisitor build_trace_visitor( soa.Self(), const_cast(this), skip_depth); if (!build_trace_visitor.Init(depth)) { return nullptr; // Allocation failed. } // If we saved all of the frames we don't even need to do the actual stack walk. This is faster // than doing the stack walk twice. if (depth < kMaxSavedFrames) { for (size_t i = 0; i < depth; ++i) { build_trace_visitor.AddFrame(saved_frames[i].first, saved_frames[i].second); } } else { build_trace_visitor.WalkStack(); } mirror::ObjectArray* trace = build_trace_visitor.GetInternalStackTrace(); if (kIsDebugBuild) { ObjPtr trace_methods = build_trace_visitor.GetTraceMethodsAndPCs(); // Second half of trace_methods is dex PCs. for (uint32_t i = 0; i < static_cast(trace_methods->GetLength() / 2); ++i) { auto* method = trace_methods->GetElementPtrSize( i, Runtime::Current()->GetClassLinker()->GetImagePointerSize()); CHECK(method != nullptr); } } return soa.AddLocalReference(trace); } bool Thread::IsExceptionThrownByCurrentMethod(ObjPtr exception) const { // Only count the depth since we do not pass a stack frame array as an argument. FetchStackTraceVisitor count_visitor(const_cast(this)); count_visitor.WalkStack(); return count_visitor.GetDepth() == static_cast(exception->GetStackDepth()); } static ObjPtr CreateStackTraceElement( const ScopedObjectAccessAlreadyRunnable& soa, ArtMethod* method, uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) { int32_t line_number; StackHandleScope<3> hs(soa.Self()); auto class_name_object(hs.NewHandle(nullptr)); auto source_name_object(hs.NewHandle(nullptr)); if (method->IsProxyMethod()) { line_number = -1; class_name_object.Assign(method->GetDeclaringClass()->GetName()); // source_name_object intentionally left null for proxy methods } else { line_number = method->GetLineNumFromDexPC(dex_pc); // Allocate element, potentially triggering GC // TODO: reuse class_name_object via Class::name_? const char* descriptor = method->GetDeclaringClassDescriptor(); CHECK(descriptor != nullptr); std::string class_name(PrettyDescriptor(descriptor)); class_name_object.Assign( mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str())); if (class_name_object == nullptr) { soa.Self()->AssertPendingOOMException(); return nullptr; } const char* source_file = method->GetDeclaringClassSourceFile(); if (line_number == -1) { // Make the line_number field of StackTraceElement hold the dex pc. // source_name_object is intentionally left null if we failed to map the dex pc to // a line number (most probably because there is no debug info). See b/30183883. line_number = static_cast(dex_pc); } else { if (source_file != nullptr) { source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file)); if (source_name_object == nullptr) { soa.Self()->AssertPendingOOMException(); return nullptr; } } } } const char* method_name = method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetName(); CHECK(method_name != nullptr); Handle method_name_object( hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name))); if (method_name_object == nullptr) { return nullptr; } return mirror::StackTraceElement::Alloc(soa.Self(), class_name_object, method_name_object, source_name_object, line_number); } jobjectArray Thread::InternalStackTraceToStackTraceElementArray( const ScopedObjectAccessAlreadyRunnable& soa, jobject internal, jobjectArray output_array, int* stack_depth) { // Decode the internal stack trace into the depth, method trace and PC trace. // Subtract one for the methods and PC trace. int32_t depth = soa.Decode(internal)->GetLength() - 1; DCHECK_GE(depth, 0); ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); jobjectArray result; if (output_array != nullptr) { // Reuse the array we were given. result = output_array; // ...adjusting the number of frames we'll write to not exceed the array length. const int32_t traces_length = soa.Decode>(result)->GetLength(); depth = std::min(depth, traces_length); } else { // Create java_trace array and place in local reference table ObjPtr> java_traces = class_linker->AllocStackTraceElementArray(soa.Self(), static_cast(depth)); if (java_traces == nullptr) { return nullptr; } result = soa.AddLocalReference(java_traces); } if (stack_depth != nullptr) { *stack_depth = depth; } for (uint32_t i = 0; i < static_cast(depth); ++i) { ObjPtr> decoded_traces = soa.Decode(internal)->AsObjectArray(); // Methods and dex PC trace is element 0. DCHECK(decoded_traces->Get(0)->IsIntArray() || decoded_traces->Get(0)->IsLongArray()); const ObjPtr method_trace = ObjPtr::DownCast(decoded_traces->Get(0)); // Prepare parameters for StackTraceElement(String cls, String method, String file, int line) ArtMethod* method = method_trace->GetElementPtrSize(i, kRuntimePointerSize); uint32_t dex_pc = method_trace->GetElementPtrSize( i + static_cast(method_trace->GetLength()) / 2, kRuntimePointerSize); const ObjPtr obj = CreateStackTraceElement(soa, method, dex_pc); if (obj == nullptr) { return nullptr; } // We are called from native: use non-transactional mode. soa.Decode>(result)->Set( static_cast(i), obj); } return result; } [[nodiscard]] static ObjPtr InitStackFrameInfo( const ScopedObjectAccessAlreadyRunnable& soa, ClassLinker* class_linker, Handle stackFrameInfo, ArtMethod* method, uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) { StackHandleScope<4> hs(soa.Self()); int32_t line_number; auto source_name_object(hs.NewHandle(nullptr)); if (method->IsProxyMethod()) { line_number = -1; // source_name_object intentionally left null for proxy methods } else { line_number = method->GetLineNumFromDexPC(dex_pc); if (line_number == -1) { // Make the line_number field of StackFrameInfo hold the dex pc. // source_name_object is intentionally left null if we failed to map the dex pc to // a line number (most probably because there is no debug info). See b/30183883. line_number = static_cast(dex_pc); } else { const char* source_file = method->GetDeclaringClassSourceFile(); if (source_file != nullptr) { source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file)); if (source_name_object == nullptr) { soa.Self()->AssertPendingOOMException(); return nullptr; } } } } Handle declaring_class_object( hs.NewHandle(method->GetDeclaringClass())); ArtMethod* interface_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize); const char* method_name = interface_method->GetName(); CHECK(method_name != nullptr); Handle method_name_object( hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name))); if (method_name_object == nullptr) { soa.Self()->AssertPendingOOMException(); return nullptr; } dex::ProtoIndex proto_idx = method->GetDexFile()->GetIndexForProtoId(interface_method->GetPrototype()); Handle method_type_object(hs.NewHandle( class_linker->ResolveMethodType(soa.Self(), proto_idx, interface_method))); if (method_type_object == nullptr) { soa.Self()->AssertPendingOOMException(); return nullptr; } stackFrameInfo->AssignFields(declaring_class_object, method_type_object, method_name_object, source_name_object, line_number, static_cast(dex_pc)); return stackFrameInfo.Get(); } constexpr jlong FILL_CLASS_REFS_ONLY = 0x2; // StackStreamFactory.FILL_CLASS_REFS_ONLY jint Thread::InternalStackTraceToStackFrameInfoArray( const ScopedObjectAccessAlreadyRunnable& soa, jlong mode, // See java.lang.StackStreamFactory for the mode flags jobject internal, jint startLevel, jint batchSize, jint startBufferIndex, jobjectArray output_array) { // Decode the internal stack trace into the depth, method trace and PC trace. // Subtract one for the methods and PC trace. int32_t depth = soa.Decode(internal)->GetLength() - 1; DCHECK_GE(depth, 0); StackHandleScope<6> hs(soa.Self()); Handle> framesOrClasses = hs.NewHandle(soa.Decode>(output_array)); jint endBufferIndex = startBufferIndex; if (startLevel < 0 || startLevel >= depth) { return endBufferIndex; } int32_t bufferSize = framesOrClasses->GetLength(); if (startBufferIndex < 0 || startBufferIndex >= bufferSize) { return endBufferIndex; } // The FILL_CLASS_REFS_ONLY flag is defined in AbstractStackWalker.fetchStackFrames() javadoc. bool isClassArray = (mode & FILL_CLASS_REFS_ONLY) != 0; Handle> decoded_traces = hs.NewHandle(soa.Decode(internal)->AsObjectArray()); // Methods and dex PC trace is element 0. DCHECK(decoded_traces->Get(0)->IsIntArray() || decoded_traces->Get(0)->IsLongArray()); Handle method_trace = hs.NewHandle(ObjPtr::DownCast(decoded_traces->Get(0))); ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); Handle sfi_class = hs.NewHandle(class_linker->FindSystemClass(soa.Self(), "Ljava/lang/StackFrameInfo;")); DCHECK(sfi_class != nullptr); MutableHandle frame = hs.NewHandle(nullptr); MutableHandle clazz = hs.NewHandle(nullptr); for (uint32_t i = static_cast(startLevel); i < static_cast(depth); ++i) { if (endBufferIndex >= startBufferIndex + batchSize || endBufferIndex >= bufferSize) { break; } ArtMethod* method = method_trace->GetElementPtrSize(i, kRuntimePointerSize); if (isClassArray) { clazz.Assign(method->GetDeclaringClass()); framesOrClasses->Set(endBufferIndex, clazz.Get()); } else { // Prepare parameters for fields in StackFrameInfo uint32_t dex_pc = method_trace->GetElementPtrSize( i + static_cast(method_trace->GetLength()) / 2, kRuntimePointerSize); ObjPtr frameObject = framesOrClasses->Get(endBufferIndex); // If libcore didn't allocate the object, we just stop here, but it's unlikely. if (frameObject == nullptr || !frameObject->InstanceOf(sfi_class.Get())) { break; } frame.Assign(ObjPtr::DownCast(frameObject)); frame.Assign(InitStackFrameInfo(soa, class_linker, frame, method, dex_pc)); // Break if InitStackFrameInfo fails to allocate objects or assign the fields. if (frame == nullptr) { break; } } ++endBufferIndex; } return endBufferIndex; } jobjectArray Thread::CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const { // This code allocates. Do not allow it to operate with a pending exception. if (IsExceptionPending()) { return nullptr; } class CollectFramesAndLocksStackVisitor : public MonitorObjectsStackVisitor { public: CollectFramesAndLocksStackVisitor(const ScopedObjectAccessAlreadyRunnable& soaa_in, Thread* self, Context* context) : MonitorObjectsStackVisitor(self, context), wait_jobject_(soaa_in.Env(), nullptr), block_jobject_(soaa_in.Env(), nullptr), soaa_(soaa_in) {} protected: VisitMethodResult StartMethod(ArtMethod* m, size_t frame_nr ATTRIBUTE_UNUSED) override REQUIRES_SHARED(Locks::mutator_lock_) { ObjPtr obj = CreateStackTraceElement( soaa_, m, GetDexPc(/* abort on error */ false)); if (obj == nullptr) { return VisitMethodResult::kEndStackWalk; } stack_trace_elements_.emplace_back(soaa_.Env(), soaa_.AddLocalReference(obj.Ptr())); return VisitMethodResult::kContinueMethod; } VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) override { lock_objects_.push_back({}); lock_objects_[lock_objects_.size() - 1].swap(frame_lock_objects_); DCHECK_EQ(lock_objects_.size(), stack_trace_elements_.size()); return VisitMethodResult::kContinueMethod; } void VisitWaitingObject(ObjPtr obj, ThreadState state ATTRIBUTE_UNUSED) override REQUIRES_SHARED(Locks::mutator_lock_) { wait_jobject_.reset(soaa_.AddLocalReference(obj)); } void VisitSleepingObject(ObjPtr obj) override REQUIRES_SHARED(Locks::mutator_lock_) { wait_jobject_.reset(soaa_.AddLocalReference(obj)); } void VisitBlockedOnObject(ObjPtr obj, ThreadState state ATTRIBUTE_UNUSED, uint32_t owner_tid ATTRIBUTE_UNUSED) override REQUIRES_SHARED(Locks::mutator_lock_) { block_jobject_.reset(soaa_.AddLocalReference(obj)); } void VisitLockedObject(ObjPtr obj) override REQUIRES_SHARED(Locks::mutator_lock_) { frame_lock_objects_.emplace_back(soaa_.Env(), soaa_.AddLocalReference(obj)); } public: std::vector> stack_trace_elements_; ScopedLocalRef wait_jobject_; ScopedLocalRef block_jobject_; std::vector>> lock_objects_; private: const ScopedObjectAccessAlreadyRunnable& soaa_; std::vector> frame_lock_objects_; }; std::unique_ptr context(Context::Create()); CollectFramesAndLocksStackVisitor dumper(soa, const_cast(this), context.get()); dumper.WalkStack(); // There should not be a pending exception. Otherwise, return with it pending. if (IsExceptionPending()) { return nullptr; } // Now go and create Java arrays. ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); StackHandleScope<6> hs(soa.Self()); Handle h_aste_array_class = hs.NewHandle(class_linker->FindSystemClass( soa.Self(), "[Ldalvik/system/AnnotatedStackTraceElement;")); if (h_aste_array_class == nullptr) { return nullptr; } Handle h_aste_class = hs.NewHandle(h_aste_array_class->GetComponentType()); Handle h_o_array_class = hs.NewHandle(GetClassRoot>(class_linker)); DCHECK(h_o_array_class != nullptr); // Class roots must be already initialized. // Make sure the AnnotatedStackTraceElement.class is initialized, b/76208924 . class_linker->EnsureInitialized(soa.Self(), h_aste_class, /* can_init_fields= */ true, /* can_init_parents= */ true); if (soa.Self()->IsExceptionPending()) { // This should not fail in a healthy runtime. return nullptr; } ArtField* stack_trace_element_field = h_aste_class->FindDeclaredInstanceField("stackTraceElement", "Ljava/lang/StackTraceElement;"); DCHECK(stack_trace_element_field != nullptr); ArtField* held_locks_field = h_aste_class->FindDeclaredInstanceField("heldLocks", "[Ljava/lang/Object;"); DCHECK(held_locks_field != nullptr); ArtField* blocked_on_field = h_aste_class->FindDeclaredInstanceField("blockedOn", "Ljava/lang/Object;"); DCHECK(blocked_on_field != nullptr); int32_t length = static_cast(dumper.stack_trace_elements_.size()); ObjPtr> array = mirror::ObjectArray::Alloc(soa.Self(), h_aste_array_class.Get(), length); if (array == nullptr) { soa.Self()->AssertPendingOOMException(); return nullptr; } ScopedLocalRef result(soa.Env(), soa.Env()->AddLocalReference(array)); MutableHandle handle(hs.NewHandle(nullptr)); MutableHandle> handle2( hs.NewHandle>(nullptr)); for (size_t i = 0; i != static_cast(length); ++i) { handle.Assign(h_aste_class->AllocObject(soa.Self())); if (handle == nullptr) { soa.Self()->AssertPendingOOMException(); return nullptr; } // Set stack trace element. stack_trace_element_field->SetObject( handle.Get(), soa.Decode(dumper.stack_trace_elements_[i].get())); // Create locked-on array. if (!dumper.lock_objects_[i].empty()) { handle2.Assign(mirror::ObjectArray::Alloc( soa.Self(), h_o_array_class.Get(), static_cast(dumper.lock_objects_[i].size()))); if (handle2 == nullptr) { soa.Self()->AssertPendingOOMException(); return nullptr; } int32_t j = 0; for (auto& scoped_local : dumper.lock_objects_[i]) { if (scoped_local == nullptr) { continue; } handle2->Set(j, soa.Decode(scoped_local.get())); DCHECK(!soa.Self()->IsExceptionPending()); j++; } held_locks_field->SetObject(handle.Get(), handle2.Get()); } // Set blocked-on object. if (i == 0) { if (dumper.block_jobject_ != nullptr) { blocked_on_field->SetObject( handle.Get(), soa.Decode(dumper.block_jobject_.get())); } } ScopedLocalRef elem(soa.Env(), soa.AddLocalReference(handle.Get())); soa.Env()->SetObjectArrayElement(result.get(), static_cast(i), elem.get()); DCHECK(!soa.Self()->IsExceptionPending()); } return result.release(); } void Thread::ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) { va_list args; va_start(args, fmt); ThrowNewExceptionV(exception_class_descriptor, fmt, args); va_end(args); } void Thread::ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap) { std::string msg; StringAppendV(&msg, fmt, ap); ThrowNewException(exception_class_descriptor, msg.c_str()); } void Thread::ThrowNewException(const char* exception_class_descriptor, const char* msg) { // Callers should either clear or call ThrowNewWrappedException. AssertNoPendingExceptionForNewException(msg); ThrowNewWrappedException(exception_class_descriptor, msg); } static ObjPtr GetCurrentClassLoader(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) { ArtMethod* method = self->GetCurrentMethod(nullptr); return method != nullptr ? method->GetDeclaringClass()->GetClassLoader() : nullptr; } void Thread::ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg) { DCHECK_EQ(this, Thread::Current()); ScopedObjectAccessUnchecked soa(this); StackHandleScope<3> hs(soa.Self()); // Disable public sdk checks if we need to throw exceptions. // The checks are only used in AOT compilation and may block (exception) class // initialization if it needs access to private fields (e.g. serialVersionUID). // // Since throwing an exception will EnsureInitialization and the public sdk may // block that, disable the checks. It's ok to do so, because the thrown exceptions // are not part of the application code that needs to verified. ScopedDisablePublicSdkChecker sdpsc; Handle class_loader(hs.NewHandle(GetCurrentClassLoader(soa.Self()))); ScopedLocalRef cause(GetJniEnv(), soa.AddLocalReference(GetException())); ClearException(); Runtime* runtime = Runtime::Current(); auto* cl = runtime->GetClassLinker(); Handle exception_class( hs.NewHandle(cl->FindClass(this, exception_class_descriptor, class_loader))); if (UNLIKELY(exception_class == nullptr)) { CHECK(IsExceptionPending()); LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor); return; } if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(soa.Self(), exception_class, true, true))) { DCHECK(IsExceptionPending()); return; } DCHECK_IMPLIES(runtime->IsStarted(), exception_class->IsThrowableClass()); Handle exception( hs.NewHandle(ObjPtr::DownCast(exception_class->AllocObject(this)))); // If we couldn't allocate the exception, throw the pre-allocated out of memory exception. if (exception == nullptr) { Dump(LOG_STREAM(WARNING)); // The pre-allocated OOME has no stack, so help out and log one. SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryErrorWhenThrowingException()); return; } // Choose an appropriate constructor and set up the arguments. const char* signature; ScopedLocalRef msg_string(GetJniEnv(), nullptr); if (msg != nullptr) { // Ensure we remember this and the method over the String allocation. msg_string.reset( soa.AddLocalReference(mirror::String::AllocFromModifiedUtf8(this, msg))); if (UNLIKELY(msg_string.get() == nullptr)) { CHECK(IsExceptionPending()); // OOME. return; } if (cause.get() == nullptr) { signature = "(Ljava/lang/String;)V"; } else { signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V"; } } else { if (cause.get() == nullptr) { signature = "()V"; } else { signature = "(Ljava/lang/Throwable;)V"; } } ArtMethod* exception_init_method = exception_class->FindConstructor(signature, cl->GetImagePointerSize()); CHECK(exception_init_method != nullptr) << "No " << signature << " in " << PrettyDescriptor(exception_class_descriptor); if (UNLIKELY(!runtime->IsStarted())) { // Something is trying to throw an exception without a started runtime, which is the common // case in the compiler. We won't be able to invoke the constructor of the exception, so set // the exception fields directly. if (msg != nullptr) { exception->SetDetailMessage(DecodeJObject(msg_string.get())->AsString()); } if (cause.get() != nullptr) { exception->SetCause(DecodeJObject(cause.get())->AsThrowable()); } ScopedLocalRef trace(GetJniEnv(), CreateInternalStackTrace(soa)); if (trace.get() != nullptr) { exception->SetStackState(DecodeJObject(trace.get()).Ptr()); } SetException(exception.Get()); } else { jvalue jv_args[2]; size_t i = 0; if (msg != nullptr) { jv_args[i].l = msg_string.get(); ++i; } if (cause.get() != nullptr) { jv_args[i].l = cause.get(); ++i; } ScopedLocalRef ref(soa.Env(), soa.AddLocalReference(exception.Get())); InvokeWithJValues(soa, ref.get(), exception_init_method, jv_args); if (LIKELY(!IsExceptionPending())) { SetException(exception.Get()); } } } void Thread::ThrowOutOfMemoryError(const char* msg) { LOG(WARNING) << "Throwing OutOfMemoryError " << '"' << msg << '"' << " (VmSize " << GetProcessStatus("VmSize") << (tls32_.throwing_OutOfMemoryError ? ", recursive case)" : ")"); ScopedTrace trace("OutOfMemoryError"); if (!tls32_.throwing_OutOfMemoryError) { tls32_.throwing_OutOfMemoryError = true; ThrowNewException("Ljava/lang/OutOfMemoryError;", msg); tls32_.throwing_OutOfMemoryError = false; } else { Dump(LOG_STREAM(WARNING)); // The pre-allocated OOME has no stack, so help out and log one. SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryErrorWhenThrowingOOME()); } } Thread* Thread::CurrentFromGdb() { return Thread::Current(); } void Thread::DumpFromGdb() const { std::ostringstream ss; Dump(ss); std::string str(ss.str()); // log to stderr for debugging command line processes std::cerr << str; #ifdef ART_TARGET_ANDROID // log to logcat for debugging frameworks processes LOG(INFO) << str; #endif } // Explicitly instantiate 32 and 64bit thread offset dumping support. template void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset); template void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset); template void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) { #define DO_THREAD_OFFSET(x, y) \ if (offset == (x).Uint32Value()) { \ os << (y); \ return; \ } DO_THREAD_OFFSET(ThreadFlagsOffset(), "state_and_flags") DO_THREAD_OFFSET(CardTableOffset(), "card_table") DO_THREAD_OFFSET(ExceptionOffset(), "exception") DO_THREAD_OFFSET(PeerOffset(), "peer"); DO_THREAD_OFFSET(JniEnvOffset(), "jni_env") DO_THREAD_OFFSET(SelfOffset(), "self") DO_THREAD_OFFSET(StackEndOffset(), "stack_end") DO_THREAD_OFFSET(ThinLockIdOffset(), "thin_lock_thread_id") DO_THREAD_OFFSET(IsGcMarkingOffset(), "is_gc_marking") DO_THREAD_OFFSET(TopOfManagedStackOffset(), "top_quick_frame_method") DO_THREAD_OFFSET(TopShadowFrameOffset(), "top_shadow_frame") DO_THREAD_OFFSET(TopHandleScopeOffset(), "top_handle_scope") DO_THREAD_OFFSET(ThreadSuspendTriggerOffset(), "suspend_trigger") #undef DO_THREAD_OFFSET #define JNI_ENTRY_POINT_INFO(x) \ if (JNI_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \ os << #x; \ return; \ } JNI_ENTRY_POINT_INFO(pDlsymLookup) JNI_ENTRY_POINT_INFO(pDlsymLookupCritical) #undef JNI_ENTRY_POINT_INFO #define QUICK_ENTRY_POINT_INFO(x) \ if (QUICK_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \ os << #x; \ return; \ } QUICK_ENTRY_POINT_INFO(pAllocArrayResolved) QUICK_ENTRY_POINT_INFO(pAllocArrayResolved8) QUICK_ENTRY_POINT_INFO(pAllocArrayResolved16) QUICK_ENTRY_POINT_INFO(pAllocArrayResolved32) QUICK_ENTRY_POINT_INFO(pAllocArrayResolved64) QUICK_ENTRY_POINT_INFO(pAllocObjectResolved) QUICK_ENTRY_POINT_INFO(pAllocObjectInitialized) QUICK_ENTRY_POINT_INFO(pAllocObjectWithChecks) QUICK_ENTRY_POINT_INFO(pAllocStringObject) QUICK_ENTRY_POINT_INFO(pAllocStringFromBytes) QUICK_ENTRY_POINT_INFO(pAllocStringFromChars) QUICK_ENTRY_POINT_INFO(pAllocStringFromString) QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial) QUICK_ENTRY_POINT_INFO(pCheckInstanceOf) QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage) QUICK_ENTRY_POINT_INFO(pResolveTypeAndVerifyAccess) QUICK_ENTRY_POINT_INFO(pResolveType) QUICK_ENTRY_POINT_INFO(pResolveString) QUICK_ENTRY_POINT_INFO(pSet8Instance) QUICK_ENTRY_POINT_INFO(pSet8Static) QUICK_ENTRY_POINT_INFO(pSet16Instance) QUICK_ENTRY_POINT_INFO(pSet16Static) QUICK_ENTRY_POINT_INFO(pSet32Instance) QUICK_ENTRY_POINT_INFO(pSet32Static) QUICK_ENTRY_POINT_INFO(pSet64Instance) QUICK_ENTRY_POINT_INFO(pSet64Static) QUICK_ENTRY_POINT_INFO(pSetObjInstance) QUICK_ENTRY_POINT_INFO(pSetObjStatic) QUICK_ENTRY_POINT_INFO(pGetByteInstance) QUICK_ENTRY_POINT_INFO(pGetBooleanInstance) QUICK_ENTRY_POINT_INFO(pGetByteStatic) QUICK_ENTRY_POINT_INFO(pGetBooleanStatic) QUICK_ENTRY_POINT_INFO(pGetShortInstance) QUICK_ENTRY_POINT_INFO(pGetCharInstance) QUICK_ENTRY_POINT_INFO(pGetShortStatic) QUICK_ENTRY_POINT_INFO(pGetCharStatic) QUICK_ENTRY_POINT_INFO(pGet32Instance) QUICK_ENTRY_POINT_INFO(pGet32Static) QUICK_ENTRY_POINT_INFO(pGet64Instance) QUICK_ENTRY_POINT_INFO(pGet64Static) QUICK_ENTRY_POINT_INFO(pGetObjInstance) QUICK_ENTRY_POINT_INFO(pGetObjStatic) QUICK_ENTRY_POINT_INFO(pAputObject) QUICK_ENTRY_POINT_INFO(pJniMethodStart) QUICK_ENTRY_POINT_INFO(pJniMethodEnd) QUICK_ENTRY_POINT_INFO(pJniMethodEntryHook) QUICK_ENTRY_POINT_INFO(pJniDecodeReferenceResult) QUICK_ENTRY_POINT_INFO(pJniLockObject) QUICK_ENTRY_POINT_INFO(pJniUnlockObject) QUICK_ENTRY_POINT_INFO(pQuickGenericJniTrampoline) QUICK_ENTRY_POINT_INFO(pLockObject) QUICK_ENTRY_POINT_INFO(pUnlockObject) QUICK_ENTRY_POINT_INFO(pCmpgDouble) QUICK_ENTRY_POINT_INFO(pCmpgFloat) QUICK_ENTRY_POINT_INFO(pCmplDouble) QUICK_ENTRY_POINT_INFO(pCmplFloat) QUICK_ENTRY_POINT_INFO(pCos) QUICK_ENTRY_POINT_INFO(pSin) QUICK_ENTRY_POINT_INFO(pAcos) QUICK_ENTRY_POINT_INFO(pAsin) QUICK_ENTRY_POINT_INFO(pAtan) QUICK_ENTRY_POINT_INFO(pAtan2) QUICK_ENTRY_POINT_INFO(pCbrt) QUICK_ENTRY_POINT_INFO(pCosh) QUICK_ENTRY_POINT_INFO(pExp) QUICK_ENTRY_POINT_INFO(pExpm1) QUICK_ENTRY_POINT_INFO(pHypot) QUICK_ENTRY_POINT_INFO(pLog) QUICK_ENTRY_POINT_INFO(pLog10) QUICK_ENTRY_POINT_INFO(pNextAfter) QUICK_ENTRY_POINT_INFO(pSinh) QUICK_ENTRY_POINT_INFO(pTan) QUICK_ENTRY_POINT_INFO(pTanh) QUICK_ENTRY_POINT_INFO(pFmod) QUICK_ENTRY_POINT_INFO(pL2d) QUICK_ENTRY_POINT_INFO(pFmodf) QUICK_ENTRY_POINT_INFO(pL2f) QUICK_ENTRY_POINT_INFO(pD2iz) QUICK_ENTRY_POINT_INFO(pF2iz) QUICK_ENTRY_POINT_INFO(pIdivmod) QUICK_ENTRY_POINT_INFO(pD2l) QUICK_ENTRY_POINT_INFO(pF2l) QUICK_ENTRY_POINT_INFO(pLdiv) QUICK_ENTRY_POINT_INFO(pLmod) QUICK_ENTRY_POINT_INFO(pLmul) QUICK_ENTRY_POINT_INFO(pShlLong) QUICK_ENTRY_POINT_INFO(pShrLong) QUICK_ENTRY_POINT_INFO(pUshrLong) QUICK_ENTRY_POINT_INFO(pIndexOf) QUICK_ENTRY_POINT_INFO(pStringCompareTo) QUICK_ENTRY_POINT_INFO(pMemcpy) QUICK_ENTRY_POINT_INFO(pQuickImtConflictTrampoline) QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampoline) QUICK_ENTRY_POINT_INFO(pQuickToInterpreterBridge) QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck) QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck) QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck) QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck) QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck) QUICK_ENTRY_POINT_INFO(pInvokePolymorphic) QUICK_ENTRY_POINT_INFO(pTestSuspend) QUICK_ENTRY_POINT_INFO(pDeliverException) QUICK_ENTRY_POINT_INFO(pThrowArrayBounds) QUICK_ENTRY_POINT_INFO(pThrowDivZero) QUICK_ENTRY_POINT_INFO(pThrowNullPointer) QUICK_ENTRY_POINT_INFO(pThrowStackOverflow) QUICK_ENTRY_POINT_INFO(pDeoptimize) QUICK_ENTRY_POINT_INFO(pA64Load) QUICK_ENTRY_POINT_INFO(pA64Store) QUICK_ENTRY_POINT_INFO(pNewEmptyString) QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_B) QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BB) QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BI) QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BII) QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIII) QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIIString) QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BString) QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIICharset) QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BCharset) QUICK_ENTRY_POINT_INFO(pNewStringFromChars_C) QUICK_ENTRY_POINT_INFO(pNewStringFromChars_CII) QUICK_ENTRY_POINT_INFO(pNewStringFromChars_IIC) QUICK_ENTRY_POINT_INFO(pNewStringFromCodePoints) QUICK_ENTRY_POINT_INFO(pNewStringFromString) QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuffer) QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuilder) QUICK_ENTRY_POINT_INFO(pNewStringFromUtf16Bytes_BII) QUICK_ENTRY_POINT_INFO(pJniReadBarrier) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg00) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg01) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg02) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg03) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg04) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg05) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg06) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg07) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg08) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg09) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg10) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg11) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg12) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg13) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg14) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg15) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg16) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg17) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg18) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg19) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg20) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg21) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg22) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg23) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg24) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg25) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg26) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg27) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg28) QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg29) QUICK_ENTRY_POINT_INFO(pReadBarrierSlow) QUICK_ENTRY_POINT_INFO(pReadBarrierForRootSlow) #undef QUICK_ENTRY_POINT_INFO os << offset; } void Thread::QuickDeliverException(bool skip_method_exit_callbacks) { // Get exception from thread. ObjPtr exception = GetException(); CHECK(exception != nullptr); if (exception == GetDeoptimizationException()) { // This wasn't a real exception, so just clear it here. If there was an actual exception it // will be recorded in the DeoptimizationContext and it will be restored later. ClearException(); artDeoptimize(this, skip_method_exit_callbacks); UNREACHABLE(); } ReadBarrier::MaybeAssertToSpaceInvariant(exception.Ptr()); // This is a real exception: let the instrumentation know about it. Exception throw listener // could set a breakpoint or install listeners that might require a deoptimization. Hence the // deoptimization check needs to happen after calling the listener. instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); if (instrumentation->HasExceptionThrownListeners() && IsExceptionThrownByCurrentMethod(exception)) { // Instrumentation may cause GC so keep the exception object safe. StackHandleScope<1> hs(this); HandleWrapperObjPtr h_exception(hs.NewHandleWrapper(&exception)); instrumentation->ExceptionThrownEvent(this, exception); } // Does instrumentation need to deoptimize the stack or otherwise go to interpreter for something? // Note: we do this *after* reporting the exception to instrumentation in case it now requires // deoptimization. It may happen if a debugger is attached and requests new events (single-step, // breakpoint, ...) when the exception is reported. // Frame pop can be requested on a method unwind callback which requires a deopt. We could // potentially check after each unwind callback to see if a frame pop was requested and deopt if // needed. Since this is a debug only feature and this path is only taken when an exception is // thrown, it is not performance critical and we keep it simple by just deopting if method exit // listeners are installed and frame pop feature is supported. bool needs_deopt = instrumentation->HasMethodExitListeners() && Runtime::Current()->AreNonStandardExitsEnabled(); if (Dbg::IsForcedInterpreterNeededForException(this) || IsForceInterpreter() || needs_deopt) { NthCallerVisitor visitor(this, 0, false); visitor.WalkStack(); if (visitor.GetCurrentQuickFrame() != nullptr) { if (Runtime::Current()->IsAsyncDeoptimizeable(visitor.GetOuterMethod(), visitor.caller_pc)) { // method_type shouldn't matter due to exception handling. const DeoptimizationMethodType method_type = DeoptimizationMethodType::kDefault; // Save the exception into the deoptimization context so it can be restored // before entering the interpreter. PushDeoptimizationContext( JValue(), /* is_reference= */ false, exception, /* from_code= */ false, method_type); artDeoptimize(this, skip_method_exit_callbacks); UNREACHABLE(); } else { LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method " << visitor.caller->PrettyMethod(); } } else { // This is either top of call stack, or shadow frame. DCHECK(visitor.caller == nullptr || visitor.IsShadowFrame()); } } // Don't leave exception visible while we try to find the handler, which may cause class // resolution. ClearException(); QuickExceptionHandler exception_handler(this, false); exception_handler.FindCatch(exception, skip_method_exit_callbacks); if (exception_handler.GetClearException()) { // Exception was cleared as part of delivery. DCHECK(!IsExceptionPending()); } else { // Exception was put back with a throw location. DCHECK(IsExceptionPending()); // Check the to-space invariant on the re-installed exception (if applicable). ReadBarrier::MaybeAssertToSpaceInvariant(GetException()); } exception_handler.DoLongJump(); } Context* Thread::GetLongJumpContext() { Context* result = tlsPtr_.long_jump_context; if (result == nullptr) { result = Context::Create(); } else { tlsPtr_.long_jump_context = nullptr; // Avoid context being shared. result->Reset(); } return result; } ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc_out, bool check_suspended, bool abort_on_error) const { // Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is // so we don't abort in a special situation (thinlocked monitor) when dumping the Java // stack. ArtMethod* method = nullptr; uint32_t dex_pc = dex::kDexNoIndex; StackVisitor::WalkStack( [&](const StackVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) { ArtMethod* m = visitor->GetMethod(); if (m->IsRuntimeMethod()) { // Continue if this is a runtime method. return true; } method = m; dex_pc = visitor->GetDexPc(abort_on_error); return false; }, const_cast(this), /* context= */ nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames, check_suspended); if (dex_pc_out != nullptr) { *dex_pc_out = dex_pc; } return method; } bool Thread::HoldsLock(ObjPtr object) const { return object != nullptr && object->GetLockOwnerThreadId() == GetThreadId(); } extern std::vector*> GetProxyReferenceArguments(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_); // RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor). template class ReferenceMapVisitor : public StackVisitor { public: ReferenceMapVisitor(Thread* thread, Context* context, RootVisitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_) // We are visiting the references in compiled frames, so we do not need // to know the inlined frames. : StackVisitor(thread, context, StackVisitor::StackWalkKind::kSkipInlinedFrames), visitor_(visitor), visit_declaring_class_(!Runtime::Current()->GetHeap()->IsPerformingUffdCompaction()) {} bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) { if (false) { LOG(INFO) << "Visiting stack roots in " << ArtMethod::PrettyMethod(GetMethod()) << StringPrintf("@ PC:%04x", GetDexPc()); } ShadowFrame* shadow_frame = GetCurrentShadowFrame(); if (shadow_frame != nullptr) { VisitShadowFrame(shadow_frame); } else if (GetCurrentOatQuickMethodHeader()->IsNterpMethodHeader()) { VisitNterpFrame(); } else { VisitQuickFrame(); } return true; } void VisitShadowFrame(ShadowFrame* shadow_frame) REQUIRES_SHARED(Locks::mutator_lock_) { ArtMethod* m = shadow_frame->GetMethod(); VisitDeclaringClass(m); DCHECK(m != nullptr); size_t num_regs = shadow_frame->NumberOfVRegs(); // handle scope for JNI or References for interpreter. for (size_t reg = 0; reg < num_regs; ++reg) { mirror::Object* ref = shadow_frame->GetVRegReference(reg); if (ref != nullptr) { mirror::Object* new_ref = ref; visitor_(&new_ref, reg, this); if (new_ref != ref) { shadow_frame->SetVRegReference(reg, new_ref); } } } // Mark lock count map required for structured locking checks. shadow_frame->GetLockCountData().VisitMonitors(visitor_, /* vreg= */ -1, this); } private: // Visiting the declaring class is necessary so that we don't unload the class of a method that // is executing. We need to ensure that the code stays mapped. NO_THREAD_SAFETY_ANALYSIS since // the threads do not all hold the heap bitmap lock for parallel GC. void VisitDeclaringClass(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS { if (!visit_declaring_class_) { return; } ObjPtr klass = method->GetDeclaringClassUnchecked(); // klass can be null for runtime methods. if (klass != nullptr) { if (kVerifyImageObjectsMarked) { gc::Heap* const heap = Runtime::Current()->GetHeap(); gc::space::ContinuousSpace* space = heap->FindContinuousSpaceFromObject(klass, /*fail_ok=*/true); if (space != nullptr && space->IsImageSpace()) { bool failed = false; if (!space->GetLiveBitmap()->Test(klass.Ptr())) { failed = true; LOG(FATAL_WITHOUT_ABORT) << "Unmarked object in image " << *space; } else if (!heap->GetLiveBitmap()->Test(klass.Ptr())) { failed = true; LOG(FATAL_WITHOUT_ABORT) << "Unmarked object in image through live bitmap " << *space; } if (failed) { GetThread()->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT)); space->AsImageSpace()->DumpSections(LOG_STREAM(FATAL_WITHOUT_ABORT)); LOG(FATAL_WITHOUT_ABORT) << "Method@" << method->GetDexMethodIndex() << ":" << method << " klass@" << klass.Ptr(); // Pretty info last in case it crashes. LOG(FATAL) << "Method " << method->PrettyMethod() << " klass " << klass->PrettyClass(); } } } mirror::Object* new_ref = klass.Ptr(); visitor_(&new_ref, /* vreg= */ JavaFrameRootInfo::kMethodDeclaringClass, this); if (new_ref != klass) { method->CASDeclaringClass(klass.Ptr(), new_ref->AsClass()); } } } void VisitNterpFrame() REQUIRES_SHARED(Locks::mutator_lock_) { ArtMethod** cur_quick_frame = GetCurrentQuickFrame(); StackReference* vreg_ref_base = reinterpret_cast*>(NterpGetReferenceArray(cur_quick_frame)); StackReference* vreg_int_base = reinterpret_cast*>(NterpGetRegistersArray(cur_quick_frame)); CodeItemDataAccessor accessor((*cur_quick_frame)->DexInstructionData()); const uint16_t num_regs = accessor.RegistersSize(); // An nterp frame has two arrays: a dex register array and a reference array // that shadows the dex register array but only containing references // (non-reference dex registers have nulls). See nterp_helpers.cc. for (size_t reg = 0; reg < num_regs; ++reg) { StackReference* ref_addr = vreg_ref_base + reg; mirror::Object* ref = ref_addr->AsMirrorPtr(); if (ref != nullptr) { mirror::Object* new_ref = ref; visitor_(&new_ref, reg, this); if (new_ref != ref) { ref_addr->Assign(new_ref); StackReference* int_addr = vreg_int_base + reg; int_addr->Assign(new_ref); } } } } template ALWAYS_INLINE inline void VisitQuickFrameWithVregCallback() REQUIRES_SHARED(Locks::mutator_lock_) { ArtMethod** cur_quick_frame = GetCurrentQuickFrame(); DCHECK(cur_quick_frame != nullptr); ArtMethod* m = *cur_quick_frame; VisitDeclaringClass(m); if (m->IsNative()) { // TODO: Spill the `this` reference in the AOT-compiled String.charAt() // slow-path for throwing SIOOBE, so that we can remove this carve-out. if (UNLIKELY(m->IsIntrinsic()) && m->GetIntrinsic() == enum_cast(Intrinsics::kStringCharAt)) { // The String.charAt() method is AOT-compiled with an intrinsic implementation // instead of a JNI stub. It has a slow path that constructs a runtime frame // for throwing SIOOBE and in that path we do not get the `this` pointer // spilled on the stack, so there is nothing to visit. We can distinguish // this from the GenericJni path by checking that the PC is in the boot image // (PC shall be known thanks to the runtime frame for throwing SIOOBE). // Note that JIT does not emit that intrinic implementation. const void* pc = reinterpret_cast(GetCurrentQuickFramePc()); if (pc != nullptr && Runtime::Current()->GetHeap()->IsInBootImageOatFile(pc)) { return; } } // Native methods spill their arguments to the reserved vregs in the caller's frame // and use pointers to these stack references as jobject, jclass, jarray, etc. // Note: We can come here for a @CriticalNative method when it needs to resolve the // target native function but there would be no references to visit below. const size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes(); const size_t method_pointer_size = static_cast(kRuntimePointerSize); uint32_t* current_vreg = reinterpret_cast( reinterpret_cast(cur_quick_frame) + frame_size + method_pointer_size); auto visit = [&]() REQUIRES_SHARED(Locks::mutator_lock_) { auto* ref_addr = reinterpret_cast*>(current_vreg); mirror::Object* ref = ref_addr->AsMirrorPtr(); if (ref != nullptr) { mirror::Object* new_ref = ref; visitor_(&new_ref, /* vreg= */ JavaFrameRootInfo::kNativeReferenceArgument, this); if (ref != new_ref) { ref_addr->Assign(new_ref); } } }; const char* shorty = m->GetShorty(); if (!m->IsStatic()) { visit(); current_vreg += 1u; } for (shorty += 1u; *shorty != 0; ++shorty) { switch (*shorty) { case 'D': case 'J': current_vreg += 2u; break; case 'L': visit(); FALLTHROUGH_INTENDED; default: current_vreg += 1u; break; } } } else if (!m->IsRuntimeMethod() && (!m->IsProxyMethod() || m->IsConstructor())) { // Process register map (which native, runtime and proxy methods don't have) const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader(); DCHECK(method_header->IsOptimized()); StackReference* vreg_base = reinterpret_cast*>(cur_quick_frame); uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc()); CodeInfo code_info = kPrecise ? CodeInfo(method_header) // We will need dex register maps. : CodeInfo::DecodeGcMasksOnly(method_header); StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset); DCHECK(map.IsValid()); T vreg_info(m, code_info, map, visitor_); // Visit stack entries that hold pointers. BitMemoryRegion stack_mask = code_info.GetStackMaskOf(map); for (size_t i = 0; i < stack_mask.size_in_bits(); ++i) { if (stack_mask.LoadBit(i)) { StackReference* ref_addr = vreg_base + i; mirror::Object* ref = ref_addr->AsMirrorPtr(); if (ref != nullptr) { mirror::Object* new_ref = ref; vreg_info.VisitStack(&new_ref, i, this); if (ref != new_ref) { ref_addr->Assign(new_ref); } } } } // Visit callee-save registers that hold pointers. uint32_t register_mask = code_info.GetRegisterMaskOf(map); for (uint32_t i = 0; i < BitSizeOf(); ++i) { if (register_mask & (1 << i)) { mirror::Object** ref_addr = reinterpret_cast(GetGPRAddress(i)); if (kIsDebugBuild && ref_addr == nullptr) { std::string thread_name; GetThread()->GetThreadName(thread_name); LOG(FATAL_WITHOUT_ABORT) << "On thread " << thread_name; DescribeStack(GetThread()); LOG(FATAL) << "Found an unsaved callee-save register " << i << " (null GPRAddress) " << "set in register_mask=" << register_mask << " at " << DescribeLocation(); } if (*ref_addr != nullptr) { vreg_info.VisitRegister(ref_addr, i, this); } } } } else if (!m->IsRuntimeMethod() && m->IsProxyMethod()) { // If this is a proxy method, visit its reference arguments. DCHECK(!m->IsStatic()); DCHECK(!m->IsNative()); std::vector*> ref_addrs = GetProxyReferenceArguments(cur_quick_frame); for (StackReference* ref_addr : ref_addrs) { mirror::Object* ref = ref_addr->AsMirrorPtr(); if (ref != nullptr) { mirror::Object* new_ref = ref; visitor_(&new_ref, /* vreg= */ JavaFrameRootInfo::kProxyReferenceArgument, this); if (ref != new_ref) { ref_addr->Assign(new_ref); } } } } } void VisitQuickFrame() REQUIRES_SHARED(Locks::mutator_lock_) { if (kPrecise) { VisitQuickFramePrecise(); } else { VisitQuickFrameNonPrecise(); } } void VisitQuickFrameNonPrecise() REQUIRES_SHARED(Locks::mutator_lock_) { struct UndefinedVRegInfo { UndefinedVRegInfo(ArtMethod* method ATTRIBUTE_UNUSED, const CodeInfo& code_info ATTRIBUTE_UNUSED, const StackMap& map ATTRIBUTE_UNUSED, RootVisitor& _visitor) : visitor(_visitor) { } ALWAYS_INLINE void VisitStack(mirror::Object** ref, size_t stack_index ATTRIBUTE_UNUSED, const StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) { visitor(ref, JavaFrameRootInfo::kImpreciseVreg, stack_visitor); } ALWAYS_INLINE void VisitRegister(mirror::Object** ref, size_t register_index ATTRIBUTE_UNUSED, const StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) { visitor(ref, JavaFrameRootInfo::kImpreciseVreg, stack_visitor); } RootVisitor& visitor; }; VisitQuickFrameWithVregCallback(); } void VisitQuickFramePrecise() REQUIRES_SHARED(Locks::mutator_lock_) { struct StackMapVRegInfo { StackMapVRegInfo(ArtMethod* method, const CodeInfo& _code_info, const StackMap& map, RootVisitor& _visitor) : number_of_dex_registers(method->DexInstructionData().RegistersSize()), code_info(_code_info), dex_register_map(code_info.GetDexRegisterMapOf(map)), visitor(_visitor) { DCHECK_EQ(dex_register_map.size(), number_of_dex_registers); } // TODO: If necessary, we should consider caching a reverse map instead of the linear // lookups for each location. void FindWithType(const size_t index, const DexRegisterLocation::Kind kind, mirror::Object** ref, const StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) { bool found = false; for (size_t dex_reg = 0; dex_reg != number_of_dex_registers; ++dex_reg) { DexRegisterLocation location = dex_register_map[dex_reg]; if (location.GetKind() == kind && static_cast(location.GetValue()) == index) { visitor(ref, dex_reg, stack_visitor); found = true; } } if (!found) { // If nothing found, report with unknown. visitor(ref, JavaFrameRootInfo::kUnknownVreg, stack_visitor); } } void VisitStack(mirror::Object** ref, size_t stack_index, const StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) { const size_t stack_offset = stack_index * kFrameSlotSize; FindWithType(stack_offset, DexRegisterLocation::Kind::kInStack, ref, stack_visitor); } void VisitRegister(mirror::Object** ref, size_t register_index, const StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) { FindWithType(register_index, DexRegisterLocation::Kind::kInRegister, ref, stack_visitor); } size_t number_of_dex_registers; const CodeInfo& code_info; DexRegisterMap dex_register_map; RootVisitor& visitor; }; VisitQuickFrameWithVregCallback(); } // Visitor for when we visit a root. RootVisitor& visitor_; bool visit_declaring_class_; }; class RootCallbackVisitor { public: RootCallbackVisitor(RootVisitor* visitor, uint32_t tid) : visitor_(visitor), tid_(tid) {} void operator()(mirror::Object** obj, size_t vreg, const StackVisitor* stack_visitor) const REQUIRES_SHARED(Locks::mutator_lock_) { visitor_->VisitRoot(obj, JavaFrameRootInfo(tid_, stack_visitor, vreg)); } private: RootVisitor* const visitor_; const uint32_t tid_; }; void Thread::VisitReflectiveTargets(ReflectiveValueVisitor* visitor) { for (BaseReflectiveHandleScope* brhs = GetTopReflectiveHandleScope(); brhs != nullptr; brhs = brhs->GetLink()) { brhs->VisitTargets(visitor); } } // FIXME: clang-r433403 reports the below function exceeds frame size limit. // http://b/197647048 #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wframe-larger-than=" template void Thread::VisitRoots(RootVisitor* visitor) { const uint32_t thread_id = GetThreadId(); visitor->VisitRootIfNonNull(&tlsPtr_.opeer, RootInfo(kRootThreadObject, thread_id)); if (tlsPtr_.exception != nullptr && tlsPtr_.exception != GetDeoptimizationException()) { visitor->VisitRoot(reinterpret_cast(&tlsPtr_.exception), RootInfo(kRootNativeStack, thread_id)); } if (tlsPtr_.async_exception != nullptr) { visitor->VisitRoot(reinterpret_cast(&tlsPtr_.async_exception), RootInfo(kRootNativeStack, thread_id)); } visitor->VisitRootIfNonNull(&tlsPtr_.monitor_enter_object, RootInfo(kRootNativeStack, thread_id)); tlsPtr_.jni_env->VisitJniLocalRoots(visitor, RootInfo(kRootJNILocal, thread_id)); tlsPtr_.jni_env->VisitMonitorRoots(visitor, RootInfo(kRootJNIMonitor, thread_id)); HandleScopeVisitRoots(visitor, thread_id); // Visit roots for deoptimization. if (tlsPtr_.stacked_shadow_frame_record != nullptr) { RootCallbackVisitor visitor_to_callback(visitor, thread_id); ReferenceMapVisitor mapper(this, nullptr, visitor_to_callback); for (StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record; record != nullptr; record = record->GetLink()) { for (ShadowFrame* shadow_frame = record->GetShadowFrame(); shadow_frame != nullptr; shadow_frame = shadow_frame->GetLink()) { mapper.VisitShadowFrame(shadow_frame); } } } for (DeoptimizationContextRecord* record = tlsPtr_.deoptimization_context_stack; record != nullptr; record = record->GetLink()) { if (record->IsReference()) { visitor->VisitRootIfNonNull(record->GetReturnValueAsGCRoot(), RootInfo(kRootThreadObject, thread_id)); } visitor->VisitRootIfNonNull(record->GetPendingExceptionAsGCRoot(), RootInfo(kRootThreadObject, thread_id)); } if (tlsPtr_.frame_id_to_shadow_frame != nullptr) { RootCallbackVisitor visitor_to_callback(visitor, thread_id); ReferenceMapVisitor mapper(this, nullptr, visitor_to_callback); for (FrameIdToShadowFrame* record = tlsPtr_.frame_id_to_shadow_frame; record != nullptr; record = record->GetNext()) { mapper.VisitShadowFrame(record->GetShadowFrame()); } } for (auto* verifier = tlsPtr_.method_verifier; verifier != nullptr; verifier = verifier->link_) { verifier->VisitRoots(visitor, RootInfo(kRootNativeStack, thread_id)); } // Visit roots on this thread's stack RuntimeContextType context; RootCallbackVisitor visitor_to_callback(visitor, thread_id); ReferenceMapVisitor mapper(this, &context, visitor_to_callback); mapper.template WalkStack(false); } #pragma GCC diagnostic pop static void SweepCacheEntry(IsMarkedVisitor* visitor, const Instruction* inst, size_t* value) REQUIRES_SHARED(Locks::mutator_lock_) { if (inst == nullptr) { return; } using Opcode = Instruction::Code; Opcode opcode = inst->Opcode(); switch (opcode) { case Opcode::NEW_INSTANCE: case Opcode::CHECK_CAST: case Opcode::INSTANCE_OF: case Opcode::NEW_ARRAY: case Opcode::CONST_CLASS: { mirror::Class* klass = reinterpret_cast(*value); if (klass == nullptr || klass == Runtime::GetWeakClassSentinel()) { return; } mirror::Class* new_klass = down_cast(visitor->IsMarked(klass)); if (new_klass == nullptr) { *value = reinterpret_cast(Runtime::GetWeakClassSentinel()); } else if (new_klass != klass) { *value = reinterpret_cast(new_klass); } return; } case Opcode::CONST_STRING: case Opcode::CONST_STRING_JUMBO: { mirror::Object* object = reinterpret_cast(*value); if (object == nullptr) { return; } mirror::Object* new_object = visitor->IsMarked(object); // We know the string is marked because it's a strongly-interned string that // is always alive (see b/117621117 for trying to make those strings weak). if (kIsDebugBuild && new_object == nullptr) { // (b/275005060) Currently the problem is reported only on CC GC. // Therefore we log it with more information. But since the failure rate // is quite high, sampling it. if (gUseReadBarrier) { Runtime* runtime = Runtime::Current(); gc::collector::ConcurrentCopying* cc = runtime->GetHeap()->ConcurrentCopyingCollector(); CHECK_NE(cc, nullptr); LOG(FATAL) << cc->DumpReferenceInfo(object, "string") << " string interned: " << std::boolalpha << runtime->GetInternTable()->LookupStrong(Thread::Current(), down_cast(object)) << std::noboolalpha; } else { // Other GCs LOG(FATAL) << __FUNCTION__ << ": IsMarked returned null for a strongly interned string: " << object; } } else if (new_object != object) { *value = reinterpret_cast(new_object); } return; } default: // The following opcode ranges store non-reference values. if ((Opcode::IGET <= opcode && opcode <= Opcode::SPUT_SHORT) || (Opcode::INVOKE_VIRTUAL <= opcode && opcode <= Opcode::INVOKE_INTERFACE_RANGE)) { return; // Nothing to do for the GC. } // New opcode is using the cache. We need to explicitly handle it in this method. DCHECK(false) << "Unhandled opcode " << inst->Opcode(); } } void Thread::SweepInterpreterCache(IsMarkedVisitor* visitor) { for (InterpreterCache::Entry& entry : GetInterpreterCache()->GetArray()) { SweepCacheEntry(visitor, reinterpret_cast(entry.first), &entry.second); } } // FIXME: clang-r433403 reports the below function exceeds frame size limit. // http://b/197647048 #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wframe-larger-than=" void Thread::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) { if ((flags & VisitRootFlags::kVisitRootFlagPrecise) != 0) { VisitRoots(visitor); } else { VisitRoots(visitor); } } #pragma GCC diagnostic pop class VerifyRootVisitor : public SingleRootVisitor { public: void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) override REQUIRES_SHARED(Locks::mutator_lock_) { VerifyObject(root); } }; void Thread::VerifyStackImpl() { if (Runtime::Current()->GetHeap()->IsObjectValidationEnabled()) { VerifyRootVisitor visitor; std::unique_ptr context(Context::Create()); RootCallbackVisitor visitor_to_callback(&visitor, GetThreadId()); ReferenceMapVisitor mapper(this, context.get(), visitor_to_callback); mapper.WalkStack(); } } // Set the stack end to that to be used during a stack overflow void Thread::SetStackEndForStackOverflow() { // During stack overflow we allow use of the full stack. if (tlsPtr_.stack_end == tlsPtr_.stack_begin) { // However, we seem to have already extended to use the full stack. LOG(ERROR) << "Need to increase kStackOverflowReservedBytes (currently " << GetStackOverflowReservedBytes(kRuntimeISA) << ")?"; DumpStack(LOG_STREAM(ERROR)); LOG(FATAL) << "Recursive stack overflow."; } tlsPtr_.stack_end = tlsPtr_.stack_begin; // Remove the stack overflow protection if is it set up. bool implicit_stack_check = Runtime::Current()->GetImplicitStackOverflowChecks(); if (implicit_stack_check) { if (!UnprotectStack()) { LOG(ERROR) << "Unable to remove stack protection for stack overflow"; } } } void Thread::SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit) { DCHECK_LE(start, end); DCHECK_LE(end, limit); tlsPtr_.thread_local_start = start; tlsPtr_.thread_local_pos = tlsPtr_.thread_local_start; tlsPtr_.thread_local_end = end; tlsPtr_.thread_local_limit = limit; tlsPtr_.thread_local_objects = 0; } void Thread::ResetTlab() { gc::Heap* const heap = Runtime::Current()->GetHeap(); if (heap->GetHeapSampler().IsEnabled()) { // Note: We always ResetTlab before SetTlab, therefore we can do the sample // offset adjustment here. heap->AdjustSampleOffset(GetTlabPosOffset()); VLOG(heap) << "JHP: ResetTlab, Tid: " << GetTid() << " adjustment = " << (tlsPtr_.thread_local_pos - tlsPtr_.thread_local_start); } SetTlab(nullptr, nullptr, nullptr); } bool Thread::HasTlab() const { const bool has_tlab = tlsPtr_.thread_local_pos != nullptr; if (has_tlab) { DCHECK(tlsPtr_.thread_local_start != nullptr && tlsPtr_.thread_local_end != nullptr); } else { DCHECK(tlsPtr_.thread_local_start == nullptr && tlsPtr_.thread_local_end == nullptr); } return has_tlab; } void Thread::AdjustTlab(size_t slide_bytes) { if (HasTlab()) { tlsPtr_.thread_local_start -= slide_bytes; tlsPtr_.thread_local_pos -= slide_bytes; tlsPtr_.thread_local_end -= slide_bytes; tlsPtr_.thread_local_limit -= slide_bytes; } } std::ostream& operator<<(std::ostream& os, const Thread& thread) { thread.ShortDump(os); return os; } bool Thread::ProtectStack(bool fatal_on_error) { void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize; VLOG(threads) << "Protecting stack at " << pregion; if (mprotect(pregion, kStackOverflowProtectedSize, PROT_NONE) == -1) { if (fatal_on_error) { // b/249586057, LOG(FATAL) times out LOG(ERROR) << "Unable to create protected region in stack for implicit overflow check. " "Reason: " << strerror(errno) << " size: " << kStackOverflowProtectedSize; exit(1); } return false; } return true; } bool Thread::UnprotectStack() { void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize; VLOG(threads) << "Unprotecting stack at " << pregion; return mprotect(pregion, kStackOverflowProtectedSize, PROT_READ|PROT_WRITE) == 0; } void Thread::PushVerifier(verifier::MethodVerifier* verifier) { verifier->link_ = tlsPtr_.method_verifier; tlsPtr_.method_verifier = verifier; } void Thread::PopVerifier(verifier::MethodVerifier* verifier) { CHECK_EQ(tlsPtr_.method_verifier, verifier); tlsPtr_.method_verifier = verifier->link_; } size_t Thread::NumberOfHeldMutexes() const { size_t count = 0; for (BaseMutex* mu : tlsPtr_.held_mutexes) { count += mu != nullptr ? 1 : 0; } return count; } void Thread::DeoptimizeWithDeoptimizationException(JValue* result) { DCHECK_EQ(GetException(), Thread::GetDeoptimizationException()); ClearException(); ObjPtr pending_exception; bool from_code = false; DeoptimizationMethodType method_type; PopDeoptimizationContext(result, &pending_exception, &from_code, &method_type); SetTopOfStack(nullptr); // Restore the exception that was pending before deoptimization then interpret the // deoptimized frames. if (pending_exception != nullptr) { SetException(pending_exception); } ShadowFrame* shadow_frame = MaybePopDeoptimizedStackedShadowFrame(); // We may not have a shadow frame if we deoptimized at the return of the // quick_to_interpreter_bridge which got directly called by art_quick_invoke_stub. if (shadow_frame != nullptr) { SetTopOfShadowStack(shadow_frame); interpreter::EnterInterpreterFromDeoptimize(this, shadow_frame, result, from_code, method_type); } } void Thread::SetAsyncException(ObjPtr new_exception) { CHECK(new_exception != nullptr); Runtime::Current()->SetAsyncExceptionsThrown(); if (kIsDebugBuild) { // Make sure we are in a checkpoint. MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_); CHECK(this == Thread::Current() || GetSuspendCount() >= 1) << "It doesn't look like this was called in a checkpoint! this: " << this << " count: " << GetSuspendCount(); } tlsPtr_.async_exception = new_exception.Ptr(); } bool Thread::ObserveAsyncException() { DCHECK(this == Thread::Current()); if (tlsPtr_.async_exception != nullptr) { if (tlsPtr_.exception != nullptr) { LOG(WARNING) << "Overwriting pending exception with async exception. Pending exception is: " << tlsPtr_.exception->Dump(); LOG(WARNING) << "Async exception is " << tlsPtr_.async_exception->Dump(); } tlsPtr_.exception = tlsPtr_.async_exception; tlsPtr_.async_exception = nullptr; return true; } else { return IsExceptionPending(); } } void Thread::SetException(ObjPtr new_exception) { CHECK(new_exception != nullptr); // TODO: DCHECK(!IsExceptionPending()); tlsPtr_.exception = new_exception.Ptr(); } bool Thread::IsAotCompiler() { return Runtime::Current()->IsAotCompiler(); } mirror::Object* Thread::GetPeerFromOtherThread() const { DCHECK(tlsPtr_.jpeer == nullptr); mirror::Object* peer = tlsPtr_.opeer; if (gUseReadBarrier && Current()->GetIsGcMarking()) { // We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack // may have not been flipped yet and peer may be a from-space (stale) ref. So explicitly // mark/forward it here. peer = art::ReadBarrier::Mark(peer); } return peer; } void Thread::SetReadBarrierEntrypoints() { // Make sure entrypoints aren't null. UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active=*/ true); } void Thread::ClearAllInterpreterCaches() { static struct ClearInterpreterCacheClosure : Closure { void Run(Thread* thread) override { thread->GetInterpreterCache()->Clear(thread); } } closure; Runtime::Current()->GetThreadList()->RunCheckpoint(&closure); } void Thread::ReleaseLongJumpContextInternal() { // Each QuickExceptionHandler gets a long jump context and uses // it for doing the long jump, after finding catch blocks/doing deoptimization. // Both finding catch blocks and deoptimization can trigger another // exception such as a result of class loading. So there can be nested // cases of exception handling and multiple contexts being used. // ReleaseLongJumpContext tries to save the context in tlsPtr_.long_jump_context // for reuse so there is no need to always allocate a new one each time when // getting a context. Since we only keep one context for reuse, delete the // existing one since the passed in context is yet to be used for longjump. delete tlsPtr_.long_jump_context; } void Thread::SetNativePriority(int new_priority) { palette_status_t status = PaletteSchedSetPriority(GetTid(), new_priority); CHECK(status == PALETTE_STATUS_OK || status == PALETTE_STATUS_CHECK_ERRNO); } int Thread::GetNativePriority() const { int priority = 0; palette_status_t status = PaletteSchedGetPriority(GetTid(), &priority); CHECK(status == PALETTE_STATUS_OK || status == PALETTE_STATUS_CHECK_ERRNO); return priority; } bool Thread::IsSystemDaemon() const { if (GetPeer() == nullptr) { return false; } return WellKnownClasses::java_lang_Thread_systemDaemon->GetBoolean(GetPeer()); } std::string Thread::StateAndFlagsAsHexString() const { std::stringstream result_stream; result_stream << std::hex << GetStateAndFlags(std::memory_order_relaxed).GetValue(); return result_stream.str(); } ScopedExceptionStorage::ScopedExceptionStorage(art::Thread* self) : self_(self), hs_(self_), excp_(hs_.NewHandle(self_->GetException())) { self_->ClearException(); } void ScopedExceptionStorage::SuppressOldException(const char* message) { CHECK(self_->IsExceptionPending()) << *self_; ObjPtr old_suppressed(excp_.Get()); excp_.Assign(self_->GetException()); if (old_suppressed != nullptr) { LOG(WARNING) << message << "Suppressing old exception: " << old_suppressed->Dump(); } self_->ClearException(); } ScopedExceptionStorage::~ScopedExceptionStorage() { CHECK(!self_->IsExceptionPending()) << *self_; if (!excp_.IsNull()) { self_->SetException(excp_.Get()); } } } // namespace art #pragma clang diagnostic pop // -Wconversion