1 /** 2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd. 3 * Licensed under the Apache License, Version 2.0 (the "License"); 4 * you may not use this file except in compliance with the License. 5 * You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software 10 * distributed under the License is distributed on an "AS IS" BASIS, 11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 * See the License for the specific language governing permissions and 13 * limitations under the License. 14 */ 15 #ifndef PANDA_RUNTIME_MANAGED_THREAD_H 16 #define PANDA_RUNTIME_MANAGED_THREAD_H 17 18 #include "thread.h" 19 20 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage) 21 #define ASSERT_MANAGED_CODE() ASSERT(::panda::ManagedThread::GetCurrent()->IsManagedCode()) 22 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage) 23 #define ASSERT_NATIVE_CODE() ASSERT(::panda::ManagedThread::GetCurrent()->IsInNativeCode()) 24 25 namespace panda { 26 /** 27 * \brief Class represents managed thread 28 * 29 * When the thread is created it registers itself in the runtime, so 30 * runtime knows about all managed threads at any given time. 31 * 32 * This class should be used to store thread specitic information that 33 * is necessary to execute managed code: 34 * - Frame 35 * - Exception 36 * - Interpreter cache 37 * - etc. 38 * 39 * Now it's used by interpreter to store current frame only. 40 */ 41 class ManagedThread : public Thread { 42 public: 43 enum ThreadState : uint8_t { NATIVE_CODE = 0, MANAGED_CODE = 1 }; 44 45 using native_handle_type = os::thread::native_handle_type; 46 static constexpr ThreadId NON_INITIALIZED_THREAD_ID = 0; 47 static constexpr ThreadId MAX_INTERNAL_THREAD_ID = MarkWord::LIGHT_LOCK_THREADID_MAX_COUNT; 48 static constexpr size_t STACK_MAX_SIZE_OVERFLOW_CHECK = 256_MB; 49 #if defined(PANDA_ASAN_ON) || defined(PANDA_TSAN_ON) || !defined(NDEBUG) 50 static constexpr size_t STACK_OVERFLOW_RESERVED_SIZE = 64_KB; 51 #else 52 static constexpr size_t STACK_OVERFLOW_RESERVED_SIZE = 8_KB; 53 #endif 54 static constexpr size_t STACK_OVERFLOW_PROTECTED_SIZE = 4_KB; 55 SetLanguageContext(const LanguageContext & ctx)56 void SetLanguageContext([[maybe_unused]] const LanguageContext &ctx) 57 { 58 // Deprecated method, don't use it. Only for copability with ets_runtime. 59 } 60 SetCurrentFrame(Frame * f)61 void SetCurrentFrame(Frame *f) 62 { 63 frame_ = f; 64 } 65 GetPtThreadInfo()66 tooling::PtThreadInfo *GetPtThreadInfo() const 67 { 68 return pt_thread_info_.get(); 69 } 70 GetCurrentFrame()71 Frame *GetCurrentFrame() const 72 { 73 return frame_; 74 } 75 GetFrame()76 void *GetFrame() const 77 { 78 void *fp = GetCurrentFrame(); 79 if (IsCurrentFrameCompiled()) { 80 return (StackWalker::IsBoundaryFrame<FrameKind::INTERPRETER>(fp)) 81 ? (StackWalker::GetPrevFromBoundary<FrameKind::COMPILER>(fp)) 82 : fp; 83 } 84 return fp; 85 } 86 IsCurrentFrameCompiled()87 bool IsCurrentFrameCompiled() const 88 { 89 return is_compiled_frame_; 90 } 91 SetCurrentFrameIsCompiled(bool value)92 void SetCurrentFrameIsCompiled(bool value) 93 { 94 is_compiled_frame_ = value; 95 } 96 SetException(ObjectHeader * exception)97 void SetException(ObjectHeader *exception) 98 { 99 exception_ = exception; 100 } 101 GetException()102 ObjectHeader *GetException() const 103 { 104 return exception_; 105 } 106 HasPendingException()107 bool HasPendingException() const 108 { 109 return exception_ != nullptr; 110 } 111 ClearException()112 void ClearException() 113 { 114 exception_ = nullptr; 115 } 116 ThreadIsManagedThread(const Thread * thread)117 static bool ThreadIsManagedThread(const Thread *thread) 118 { 119 ASSERT(thread != nullptr); 120 Thread::ThreadType thread_type = thread->GetThreadType(); 121 return thread_type == Thread::ThreadType::THREAD_TYPE_MANAGED || 122 thread_type == Thread::ThreadType::THREAD_TYPE_MT_MANAGED || 123 thread_type == Thread::ThreadType::THREAD_TYPE_TASK; 124 } 125 CastFromThread(Thread * thread)126 static ManagedThread *CastFromThread(Thread *thread) 127 { 128 ASSERT(thread != nullptr); 129 ASSERT(ThreadIsManagedThread(thread)); 130 return static_cast<ManagedThread *>(thread); 131 } 132 133 /** 134 * @brief GetCurrentRaw Unsafe method to get current ManagedThread. 135 * It can be used in hotspots to get the best performance. 136 * We can only use this method in places where the ManagedThread exists. 137 * @return pointer to ManagedThread 138 */ GetCurrentRaw()139 static ManagedThread *GetCurrentRaw() 140 { 141 return CastFromThread(Thread::GetCurrent()); 142 } 143 144 /** 145 * @brief GetCurrent Safe method to gets current ManagedThread. 146 * @return pointer to ManagedThread or nullptr (if current thread is not a managed thread) 147 */ GetCurrent()148 static ManagedThread *GetCurrent() 149 { 150 Thread *thread = Thread::GetCurrent(); 151 ASSERT(thread != nullptr); 152 if (ThreadIsManagedThread(thread)) { 153 return CastFromThread(thread); 154 } 155 return nullptr; 156 } 157 158 static void Initialize(); 159 160 static void Shutdown(); 161 IsThreadAlive()162 bool IsThreadAlive() 163 { 164 return GetStatus() != ThreadStatus::FINISHED; 165 } 166 UpdateStatus(enum ThreadStatus status)167 void UpdateStatus(enum ThreadStatus status) 168 { 169 ASSERT(ManagedThread::GetCurrent() == this); 170 171 ThreadStatus old_status = GetStatus(); 172 if (old_status == ThreadStatus::RUNNING && status != ThreadStatus::RUNNING) { 173 TransitionFromRunningToSuspended(status); 174 } else if (old_status != ThreadStatus::RUNNING && status == ThreadStatus::RUNNING) { 175 // NB! This thread is treated as suspended so when we transition from suspended state to 176 // running we need to check suspension flag and counter so SafepointPoll has to be done before 177 // acquiring mutator_lock. 178 // StoreStatus acquires lock here 179 StoreStatus<CHECK_SAFEPOINT, READLOCK>(ThreadStatus::RUNNING); 180 } else if (old_status == ThreadStatus::NATIVE && status != ThreadStatus::IS_TERMINATED_LOOP && 181 IsRuntimeTerminated()) { 182 // If a daemon thread with NATIVE status was deregistered, it should not access any managed object, 183 // i.e. change its status from NATIVE, because such object may already be deleted by the runtime. 184 // In case its status is changed, we must call a Safepoint to terminate this thread. 185 // For example, if a daemon thread calls ManagedCodeBegin (which changes status from NATIVE to 186 // RUNNING), it may be interrupted by a GC thread, which changes status to IS_SUSPENDED. 187 StoreStatus<CHECK_SAFEPOINT>(status); 188 } else { 189 // NB! Status is not a simple bit, without atomics it can produce faulty GetStatus. 190 StoreStatus(status); 191 } 192 } 193 GetStatus()194 enum ThreadStatus GetStatus() 195 { 196 // Atomic with acquire order reason: data race with flags with dependecies on reads after 197 // the load which should become visible 198 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) 199 uint32_t res_int = fts_.as_atomic.load(std::memory_order_acquire); 200 return static_cast<enum ThreadStatus>(res_int >> THREAD_STATUS_OFFSET); 201 } 202 203 static PandaString ThreadStatusAsString(enum ThreadStatus status); 204 GetStackFrameAllocator()205 panda::mem::StackFrameAllocator *GetStackFrameAllocator() const 206 { 207 return stack_frame_allocator_; 208 } 209 GetLocalInternalAllocator()210 panda::mem::InternalAllocator<>::LocalSmallObjectAllocator *GetLocalInternalAllocator() const 211 { 212 return internal_local_allocator_; 213 } 214 GetTLAB()215 mem::TLAB *GetTLAB() const 216 { 217 ASSERT(tlab_ != nullptr); 218 return tlab_; 219 } 220 221 void UpdateTLAB(mem::TLAB *tlab); 222 223 void ClearTLAB(); 224 SetStringClassPtr(void * p)225 void SetStringClassPtr(void *p) 226 { 227 string_class_ptr_ = p; 228 } 229 230 #ifndef NDEBUG IsRuntimeCallEnabled()231 bool IsRuntimeCallEnabled() const 232 { 233 return runtime_call_enabled_ != 0; 234 } 235 #endif 236 237 static ManagedThread *Create( 238 Runtime *runtime, PandaVM *vm, 239 panda::panda_file::SourceLang thread_lang = panda::panda_file::SourceLang::PANDA_ASSEMBLY); 240 ~ManagedThread() override; 241 242 explicit ManagedThread(ThreadId id, mem::InternalAllocatorPtr allocator, PandaVM *vm, 243 Thread::ThreadType thread_type, 244 panda::panda_file::SourceLang thread_lang = panda::panda_file::SourceLang::PANDA_ASSEMBLY); 245 246 // Here methods which are just proxy or cache for runtime interface 247 GetPreBarrierType()248 ALWAYS_INLINE mem::BarrierType GetPreBarrierType() const 249 { 250 return pre_barrier_type_; 251 } 252 GetPostBarrierType()253 ALWAYS_INLINE mem::BarrierType GetPostBarrierType() const 254 { 255 return post_barrier_type_; 256 } 257 258 // Methods to access thread local storage GetInterpreterCache()259 InterpreterCache *GetInterpreterCache() 260 { 261 return &interpreter_cache_; 262 } 263 GetNativePc()264 uintptr_t GetNativePc() const 265 { 266 return native_pc_; 267 } 268 SetNativePc(uintptr_t pc)269 void SetNativePc(uintptr_t pc) 270 { 271 native_pc_ = pc; 272 } 273 274 // buffers may be destroyed during Detach(), so it should be initialized once more 275 void InitBuffers(); 276 GetPreBuff()277 PandaVector<ObjectHeader *> *GetPreBuff() const 278 { 279 return pre_buff_; 280 } 281 MovePreBuff()282 PandaVector<ObjectHeader *> *MovePreBuff() 283 { 284 auto res = pre_buff_; 285 pre_buff_ = nullptr; 286 return res; 287 } 288 GetG1PostBarrierBuffer()289 mem::GCG1BarrierSet::G1PostBarrierRingBufferType *GetG1PostBarrierBuffer() 290 { 291 return g1_post_barrier_ring_buffer_; 292 } 293 ResetG1PostBarrierRingBuffer()294 void ResetG1PostBarrierRingBuffer() 295 { 296 g1_post_barrier_ring_buffer_ = nullptr; 297 } 298 GetThreadLang()299 panda::panda_file::SourceLang GetThreadLang() const 300 { 301 return thread_lang_; 302 } 303 304 LanguageContext GetLanguageContext(); 305 IsSuspended()306 inline bool IsSuspended() 307 { 308 return ReadFlag(SUSPEND_REQUEST); 309 } 310 IsRuntimeTerminated()311 inline bool IsRuntimeTerminated() 312 { 313 return ReadFlag(RUNTIME_TERMINATION_REQUEST); 314 } 315 SetRuntimeTerminated()316 inline void SetRuntimeTerminated() 317 { 318 SetFlag(RUNTIME_TERMINATION_REQUEST); 319 } 320 GetFrameKindOffset()321 static constexpr uint32_t GetFrameKindOffset() 322 { 323 return MEMBER_OFFSET(ManagedThread, is_compiled_frame_); 324 } GetFlagOffset()325 static constexpr uint32_t GetFlagOffset() 326 { 327 return MEMBER_OFFSET(ManagedThread, fts_); 328 } 329 GetEntrypointsOffset()330 static constexpr uint32_t GetEntrypointsOffset() 331 { 332 return MEMBER_OFFSET(ManagedThread, entrypoints_); 333 } GetObjectOffset()334 static constexpr uint32_t GetObjectOffset() 335 { 336 return MEMBER_OFFSET(ManagedThread, object_); 337 } GetFrameOffset()338 static constexpr uint32_t GetFrameOffset() 339 { 340 return MEMBER_OFFSET(ManagedThread, frame_); 341 } GetExceptionOffset()342 static constexpr uint32_t GetExceptionOffset() 343 { 344 return MEMBER_OFFSET(ManagedThread, exception_); 345 } GetNativePcOffset()346 static constexpr uint32_t GetNativePcOffset() 347 { 348 return MEMBER_OFFSET(ManagedThread, native_pc_); 349 } GetTLABOffset()350 static constexpr uint32_t GetTLABOffset() 351 { 352 return MEMBER_OFFSET(ManagedThread, tlab_); 353 } GetTlsCardTableAddrOffset()354 static constexpr uint32_t GetTlsCardTableAddrOffset() 355 { 356 return MEMBER_OFFSET(ManagedThread, card_table_addr_); 357 } GetTlsCardTableMinAddrOffset()358 static constexpr uint32_t GetTlsCardTableMinAddrOffset() 359 { 360 return MEMBER_OFFSET(ManagedThread, card_table_min_addr_); 361 } GetTlsConcurrentMarkingAddrOffset()362 static constexpr uint32_t GetTlsConcurrentMarkingAddrOffset() 363 { 364 return MEMBER_OFFSET(ManagedThread, concurrent_marking_addr_); 365 } GetTlsStringClassPointerOffset()366 static constexpr uint32_t GetTlsStringClassPointerOffset() 367 { 368 return MEMBER_OFFSET(ManagedThread, string_class_ptr_); 369 } GetPreBuffOffset()370 static constexpr uint32_t GetPreBuffOffset() 371 { 372 return MEMBER_OFFSET(ManagedThread, pre_buff_); 373 } 374 GetLanguageExtensionsDataOffset()375 static constexpr uint32_t GetLanguageExtensionsDataOffset() 376 { 377 return MEMBER_OFFSET(ManagedThread, language_extension_data_); 378 } 379 GetRuntimeCallEnabledOffset()380 static constexpr uint32_t GetRuntimeCallEnabledOffset() 381 { 382 #ifndef NDEBUG 383 return MEMBER_OFFSET(ManagedThread, runtime_call_enabled_); 384 #else 385 // it should not be used 386 return 0; 387 #endif 388 } 389 GetLanguageExtensionsData()390 void *GetLanguageExtensionsData() const 391 { 392 return language_extension_data_; 393 } 394 SetLanguageExtensionsData(void * data)395 void SetLanguageExtensionsData(void *data) 396 { 397 language_extension_data_ = data; 398 } 399 GetInternalIdOffset()400 static constexpr uint32_t GetInternalIdOffset() 401 { 402 return MEMBER_OFFSET(ManagedThread, internal_id_); 403 } 404 405 virtual void VisitGCRoots(const ObjectVisitor &cb); 406 407 virtual void UpdateGCRoots(); 408 409 void PushLocalObject(ObjectHeader **object_header); 410 411 void PopLocalObject(); 412 413 void SetThreadPriority(int32_t prio); 414 415 uint32_t GetThreadPriority(); 416 IsGcRequired()417 inline bool IsGcRequired() 418 { 419 return ReadFlag(GC_SAFEPOINT_REQUEST); 420 } 421 422 // NO_THREAD_SANITIZE for invalid TSAN data race report ReadFlag(ThreadFlag flag)423 NO_THREAD_SANITIZE bool ReadFlag(ThreadFlag flag) const 424 { 425 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) 426 return (fts_.as_struct.flags & static_cast<uint16_t>(flag)) != 0; 427 } 428 TestAllFlags()429 NO_THREAD_SANITIZE bool TestAllFlags() const 430 { 431 return (fts_.as_struct.flags) != NO_FLAGS; // NOLINT(cppcoreguidelines-pro-type-union-access) 432 } 433 SetFlag(ThreadFlag flag)434 void SetFlag(ThreadFlag flag) 435 { 436 // Atomic with seq_cst order reason: data race with flags with requirement for sequentially consistent order 437 // where threads observe all modifications in the same order 438 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) 439 fts_.as_atomic.fetch_or(flag, std::memory_order_seq_cst); 440 } 441 ClearFlag(ThreadFlag flag)442 void ClearFlag(ThreadFlag flag) 443 { 444 // Atomic with seq_cst order reason: data race with flags with requirement for sequentially consistent order 445 // where threads observe all modifications in the same order 446 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) 447 fts_.as_atomic.fetch_and(UINT32_MAX ^ flag, std::memory_order_seq_cst); 448 } 449 450 // Separate functions for NO_THREAD_SANITIZE to suppress TSAN data race report ReadFlagsAndThreadStatusUnsafe()451 NO_THREAD_SANITIZE uint32_t ReadFlagsAndThreadStatusUnsafe() 452 { 453 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) 454 return fts_.as_int; 455 } 456 IsManagedCodeAllowed()457 bool IsManagedCodeAllowed() const 458 { 459 return is_managed_code_allowed_; 460 } 461 SetManagedCodeAllowed(bool allowed)462 void SetManagedCodeAllowed(bool allowed) 463 { 464 is_managed_code_allowed_ = allowed; 465 } 466 467 // TaggedType has been specialized for js, Other types are empty implementation 468 template <typename T> PopHandleScope()469 inline HandleScope<T> *PopHandleScope() 470 { 471 return nullptr; 472 } 473 474 // TaggedType has been specialized for js, Other types are empty implementation 475 template <typename T> PushHandleScope(HandleScope<T> * handle_scope)476 inline void PushHandleScope([[maybe_unused]] HandleScope<T> *handle_scope) 477 { 478 } 479 480 // TaggedType has been specialized for js, Other types are empty implementation 481 template <typename T> GetTopScope()482 inline HandleScope<T> *GetTopScope() const 483 { 484 return nullptr; 485 } 486 487 // TaggedType has been specialized for js, Other types are empty implementation 488 template <typename T> GetHandleStorage()489 inline HandleStorage<T> *GetHandleStorage() const 490 { 491 return nullptr; 492 } 493 494 // TaggedType has been specialized for js, Other types are empty implementation 495 template <typename T> GetGlobalHandleStorage()496 inline GlobalHandleStorage<T> *GetGlobalHandleStorage() const 497 { 498 return nullptr; 499 } 500 501 CustomTLSData *GetCustomTLSData(const char *key); 502 void SetCustomTLSData(const char *key, CustomTLSData *data); 503 bool EraseCustomTLSData(const char *key); 504 505 #if EVENT_METHOD_ENTER_ENABLED || EVENT_METHOD_EXIT_ENABLED RecordMethodEnter()506 uint32_t RecordMethodEnter() 507 { 508 return call_depth_++; 509 } 510 RecordMethodExit()511 uint32_t RecordMethodExit() 512 { 513 return --call_depth_; 514 } 515 #endif 516 IsAttached()517 bool IsAttached() 518 { 519 // Atomic with relaxed order reason: data race with is_attached_ with no synchronization or ordering constraints 520 // imposed on other reads or writes 521 return is_attached_.load(std::memory_order_relaxed); 522 } 523 SetAttached()524 void SetAttached() 525 { 526 // Atomic with relaxed order reason: data race with is_attached_ with no synchronization or ordering constraints 527 // imposed on other reads or writes 528 is_attached_.store(true, std::memory_order_relaxed); 529 } 530 SetDetached()531 void SetDetached() 532 { 533 // Atomic with relaxed order reason: data race with is_attached_ with no synchronization or ordering constraints 534 // imposed on other reads or writes 535 is_attached_.store(false, std::memory_order_relaxed); 536 } 537 IsVMThread()538 bool IsVMThread() 539 { 540 return is_vm_thread_; 541 } 542 SetVMThread()543 void SetVMThread() 544 { 545 is_vm_thread_ = true; 546 } 547 IsThrowingOOM()548 bool IsThrowingOOM() 549 { 550 return throwing_oom_count_ > 0; 551 } 552 SetThrowingOOM(bool is_throwing_oom)553 void SetThrowingOOM(bool is_throwing_oom) 554 { 555 if (is_throwing_oom) { 556 throwing_oom_count_++; 557 return; 558 } 559 ASSERT(throwing_oom_count_ > 0); 560 throwing_oom_count_--; 561 } 562 IsUsePreAllocObj()563 bool IsUsePreAllocObj() 564 { 565 return use_prealloc_obj_; 566 } 567 SetUsePreAllocObj(bool use_prealloc_obj)568 void SetUsePreAllocObj(bool use_prealloc_obj) 569 { 570 use_prealloc_obj_ = use_prealloc_obj; 571 } 572 573 void PrintSuspensionStackIfNeeded(); 574 GetId()575 ThreadId GetId() const 576 { 577 // Atomic with relaxed order reason: data race with id_ with no synchronization or ordering constraints imposed 578 // on other reads or writes 579 return id_.load(std::memory_order_relaxed); 580 } 581 582 void FreeInternalMemory() override; 583 void DestroyInternalResources(); 584 585 void InitForStackOverflowCheck(size_t native_stack_reserved_size, size_t native_stack_protected_size); 586 587 void DisableStackOverflowCheck(); 588 589 void EnableStackOverflowCheck(); 590 591 template <bool check_native_stack = true, bool check_iframe_stack = true> 592 ALWAYS_INLINE inline bool StackOverflowCheck(); 593 GetStackOverflowCheckOffset()594 static size_t GetStackOverflowCheckOffset() 595 { 596 return STACK_OVERFLOW_RESERVED_SIZE; 597 } 598 GetDebugDispatchTable()599 void *const *GetDebugDispatchTable() const 600 { 601 return debug_dispatch_table; 602 } 603 SetDebugDispatchTable(const void * const * dispatch_table)604 void SetDebugDispatchTable(const void *const *dispatch_table) 605 { 606 debug_dispatch_table = const_cast<void *const *>(dispatch_table); 607 } 608 GetCurrentDispatchTable()609 void *const *GetCurrentDispatchTable() const 610 { 611 return current_dispatch_table; 612 } 613 SetCurrentDispatchTable(const void * const * dispatch_table)614 void SetCurrentDispatchTable(const void *const *dispatch_table) 615 { 616 current_dispatch_table = const_cast<void *const *>(dispatch_table); 617 } 618 619 void SuspendImpl(bool internal_suspend = false); 620 void ResumeImpl(bool internal_resume = false); 621 Suspend()622 virtual void Suspend() 623 { 624 SuspendImpl(); 625 } 626 Resume()627 virtual void Resume() 628 { 629 ResumeImpl(); 630 } 631 632 /** 633 * Transition to suspended and back to runnable, re-acquire share on mutator_lock_ 634 */ 635 void SuspendCheck(); 636 IsUserSuspended()637 bool IsUserSuspended() 638 { 639 return user_code_suspend_count_ > 0; 640 } 641 WaitSuspension()642 void WaitSuspension() 643 { 644 constexpr int TIMEOUT = 100; 645 auto old_status = GetStatus(); 646 PrintSuspensionStackIfNeeded(); 647 UpdateStatus(ThreadStatus::IS_SUSPENDED); 648 { 649 os::memory::LockHolder lock(suspend_lock_); 650 while (suspend_count_ > 0) { 651 suspend_var_.TimedWait(&suspend_lock_, TIMEOUT); 652 // In case runtime is being terminated, we should abort suspension and release monitors 653 if (UNLIKELY(IsRuntimeTerminated())) { 654 suspend_lock_.Unlock(); 655 OnRuntimeTerminated(); 656 UNREACHABLE(); 657 } 658 } 659 ASSERT(!IsSuspended()); 660 } 661 UpdateStatus(old_status); 662 } 663 OnRuntimeTerminated()664 virtual void OnRuntimeTerminated() {} 665 666 // NO_THREAD_SAFETY_ANALYSIS due to TSAN not being able to determine lock status TransitionFromRunningToSuspended(enum ThreadStatus status)667 void TransitionFromRunningToSuspended(enum ThreadStatus status) NO_THREAD_SAFETY_ANALYSIS 668 { 669 // Do Unlock after StoreStatus, because the thread requesting a suspension should see an updated status 670 StoreStatus(status); 671 Locks::mutator_lock->Unlock(); 672 } 673 674 void SafepointPoll(); 675 676 /** 677 * From NativeCode you can call ManagedCodeBegin. 678 * From ManagedCode you can call NativeCodeBegin. 679 * Call the same type is forbidden. 680 */ 681 virtual void NativeCodeBegin(); 682 virtual void NativeCodeEnd(); 683 [[nodiscard]] virtual bool IsInNativeCode() const; 684 685 virtual void ManagedCodeBegin(); 686 virtual void ManagedCodeEnd(); 687 [[nodiscard]] virtual bool IsManagedCode() const; 688 IsManagedScope()689 static bool IsManagedScope() 690 { 691 auto thread = GetCurrent(); 692 return thread != nullptr && thread->is_managed_scope_; 693 } 694 695 [[nodiscard]] bool HasManagedCodeOnStack() const; 696 [[nodiscard]] bool HasClearStack() const; 697 698 protected: 699 void ProtectNativeStack(); 700 701 template <bool check_native_stack = true, bool check_iframe_stack = true> StackOverflowCheckResult()702 ALWAYS_INLINE inline bool StackOverflowCheckResult() const 703 { 704 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) 705 if constexpr (check_native_stack) { 706 if (UNLIKELY(__builtin_frame_address(0) < ToVoidPtr(native_stack_end_))) { 707 return false; 708 } 709 } 710 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) 711 if constexpr (check_iframe_stack) { 712 if (UNLIKELY(GetStackFrameAllocator()->GetAllocatedSize() > iframe_stack_size_)) { 713 return false; 714 } 715 } 716 return true; 717 } 718 719 static const int WAIT_INTERVAL = 10; 720 721 template <typename T = void> GetAssociatedObject()722 T *GetAssociatedObject() 723 { 724 return reinterpret_cast<T *>(object_); 725 } 726 727 template <typename T> SetAssociatedObject(T * object)728 void SetAssociatedObject(T *object) 729 { 730 object_ = object; 731 } 732 InterruptPostImpl()733 virtual void InterruptPostImpl() {} 734 UpdateId(ThreadId id)735 void UpdateId(ThreadId id) 736 { 737 // Atomic with relaxed order reason: data race with id_ with no synchronization or ordering constraints imposed 738 // on other reads or writes 739 id_.store(id, std::memory_order_relaxed); 740 } 741 GetOnThreadTerminationCalled()742 bool GetOnThreadTerminationCalled() const 743 { 744 return on_thread_terminated_called; 745 } 746 SetOnThreadTerminationCalled()747 void SetOnThreadTerminationCalled() 748 { 749 on_thread_terminated_called = true; 750 } 751 752 private: 753 enum SafepointFlag : bool { DONT_CHECK_SAFEPOINT = false, CHECK_SAFEPOINT = true }; 754 enum ReadlockFlag : bool { NO_READLOCK = false, READLOCK = true }; 755 756 PandaString LogThreadStack(ThreadState new_state) const; 757 758 // NO_THREAD_SAFETY_ANALYSIS due to TSAN not being able to determine lock status 759 template <SafepointFlag safepoint = DONT_CHECK_SAFEPOINT, ReadlockFlag readlock = NO_READLOCK> StoreStatus(ThreadStatus status)760 void StoreStatus(ThreadStatus status) NO_THREAD_SAFETY_ANALYSIS 761 { 762 while (true) { 763 union FlagsAndThreadStatus old_fts { 764 }; 765 union FlagsAndThreadStatus new_fts { 766 }; 767 old_fts.as_int = ReadFlagsAndThreadStatusUnsafe(); // NOLINT(cppcoreguidelines-pro-type-union-access) 768 769 // NOLINTNEXTLINE(readability-braces-around-statements, hicpp-braces-around-statements) 770 if constexpr (safepoint == CHECK_SAFEPOINT) { // NOLINT(bugprone-suspicious-semicolon) 771 if (old_fts.as_struct.flags != NO_FLAGS) { // NOLINT(cppcoreguidelines-pro-type-union-access) 772 // someone requires a safepoint 773 SafepointPoll(); 774 continue; 775 } 776 } 777 778 new_fts.as_struct.flags = old_fts.as_struct.flags; // NOLINT(cppcoreguidelines-pro-type-union-access) 779 new_fts.as_struct.status = status; // NOLINT(cppcoreguidelines-pro-type-union-access) 780 781 // mutator lock should be acquired before change status 782 // to avoid blocking in running state 783 // NOLINTNEXTLINE(readability-braces-around-statements, hicpp-braces-around-statements) 784 if constexpr (readlock == READLOCK) { // NOLINT(bugprone-suspicious-semicolon) 785 Locks::mutator_lock->ReadLock(); 786 } 787 788 // clang-format conflicts with CodeCheckAgent, so disable it here 789 // clang-format off 790 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) 791 if (fts_.as_atomic.compare_exchange_weak( 792 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) 793 old_fts.as_nonvolatile_int, new_fts.as_nonvolatile_int, std::memory_order_release)) { 794 // If CAS succeeded, we set new status and no request occurred here, safe to proceed. 795 break; 796 } 797 // Release mutator lock to acquire it on the next loop iteration 798 // clang-format on 799 // NOLINTNEXTLINE(readability-braces-around-statements, hicpp-braces-around-statements) 800 if constexpr (readlock == READLOCK) { // NOLINT(bugprone-suspicious-semicolon) 801 Locks::mutator_lock->Unlock(); 802 } 803 } 804 } 805 806 static constexpr uint32_t THREAD_STATUS_OFFSET = 16; 807 static_assert(sizeof(fts_) == sizeof(uint32_t), "Wrong fts_ size"); 808 809 // Can cause data races if child thread's UpdateId is executed concurrently with GetNativeThreadId 810 std::atomic<ThreadId> id_; 811 812 static mem::TLAB *zero_tlab; 813 PandaVector<ObjectHeader **> local_objects_; 814 815 // Something like custom TLS - it is faster to access via ManagedThread than via thread_local 816 InterpreterCache interpreter_cache_; 817 818 PandaMap<const char *, PandaUniquePtr<CustomTLSData>> custom_tls_cache_ GUARDED_BY(Locks::custom_tls_lock); 819 820 // Keep these here to speed up interpreter 821 mem::BarrierType pre_barrier_type_ {mem::BarrierType::PRE_WRB_NONE}; 822 mem::BarrierType post_barrier_type_ {mem::BarrierType::POST_WRB_NONE}; 823 // Thread local storages to avoid locks in heap manager 824 mem::StackFrameAllocator *stack_frame_allocator_; 825 mem::InternalAllocator<>::LocalSmallObjectAllocator *internal_local_allocator_; 826 std::atomic_bool is_attached_ {false}; // Can be changed after thread is registered and can cause data race 827 bool is_vm_thread_ = false; 828 829 bool is_managed_code_allowed_ {true}; 830 831 size_t throwing_oom_count_ {0}; 832 bool use_prealloc_obj_ {false}; 833 834 panda::panda_file::SourceLang thread_lang_ = panda::panda_file::SourceLang::PANDA_ASSEMBLY; 835 836 PandaUniquePtr<tooling::PtThreadInfo> pt_thread_info_; 837 838 // for stack overflow check 839 // |..... Method 1 ....| 840 // |..... Method 2 ....| 841 // |..... Method 3 ....|_ _ _ native_stack_top 842 // |..........................| 843 // |..........................| 844 // |..........................| 845 // |..........................| 846 // |..........................| 847 // |..........................| 848 // |..........................|_ _ _ native_stack_end 849 // |..... Reserved region ....| 850 // |.... Protected region ....|_ _ _ native_stack_begin 851 // |...... Guard region ......| 852 uintptr_t native_stack_begin_ {0}; 853 // end of stack for managed thread, throw exception if native stack grow over it 854 uintptr_t native_stack_end_ {0}; 855 // os thread stack size 856 size_t native_stack_size_ {0}; 857 // guard region size of stack 858 size_t native_stack_guard_size_ {0}; 859 // reserved region is for throw exception handle if stack overflow happen 860 size_t native_stack_reserved_size_ {0}; 861 // protected region is for compiled code to test load [sp - native_stack_reserved_size_] to trigger segv 862 size_t native_stack_protected_size_ {0}; 863 // max allowed size for interpreter frame 864 size_t iframe_stack_size_ {std::numeric_limits<size_t>::max()}; 865 866 PandaVector<HandleScope<coretypes::TaggedType> *> tagged_handle_scopes_ {}; 867 HandleStorage<coretypes::TaggedType> *tagged_handle_storage_ {nullptr}; 868 GlobalHandleStorage<coretypes::TaggedType> *tagged_global_handle_storage_ {nullptr}; 869 870 PandaVector<HandleScope<ObjectHeader *> *> object_header_handle_scopes_ {}; 871 HandleStorage<ObjectHeader *> *object_header_handle_storage_ {nullptr}; 872 873 os::memory::ConditionVariable suspend_var_ GUARDED_BY(suspend_lock_); 874 os::memory::Mutex suspend_lock_; 875 uint32_t suspend_count_ GUARDED_BY(suspend_lock_) = 0; 876 std::atomic_uint32_t user_code_suspend_count_ {0}; 877 878 PandaStack<ThreadState> thread_frame_states_; 879 880 // Boolean which is safe to access after runtime is destroyed 881 bool is_managed_scope_ {false}; 882 883 // TODO(Mordan Vitalii #6852): remove this flag when FreeInternalSpace will not be called after Detach for 884 // daemon thread 885 bool on_thread_terminated_called {false}; 886 887 friend class panda::test::ThreadTest; 888 friend class panda::ThreadManager; 889 890 // Used in mathod events 891 uint32_t call_depth_ {0}; 892 893 void *const *debug_dispatch_table {nullptr}; 894 895 void *const *current_dispatch_table {nullptr}; 896 897 NO_COPY_SEMANTIC(ManagedThread); 898 NO_MOVE_SEMANTIC(ManagedThread); 899 }; 900 } // namespace panda 901 902 #endif // PANDA_RUNTIME_MANAGED_THREAD_H 903