1 /** 2 * Copyright (c) 2024 Huawei Device Co., Ltd. 3 * Licensed under the Apache License, Version 2.0 (the "License"); 4 * you may not use this file except in compliance with the License. 5 * You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software 10 * distributed under the License is distributed on an "AS IS" BASIS, 11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 * See the License for the specific language governing permissions and 13 * limitations under the License. 14 */ 15 #ifndef PANDA_RUNTIME_MANAGED_THREAD_H 16 #define PANDA_RUNTIME_MANAGED_THREAD_H 17 18 #include "thread.h" 19 20 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage) 21 #define ASSERT_MANAGED_CODE() ASSERT(::ark::ManagedThread::GetCurrent()->IsManagedCode()) 22 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage) 23 #define ASSERT_NATIVE_CODE() ASSERT(::ark::ManagedThread::GetCurrent()->IsInNativeCode()) 24 25 namespace ark { 26 class MTThreadManager; 27 /** 28 * @brief Class represents managed thread 29 * 30 * When the thread is created it registers itself in the runtime, so 31 * runtime knows about all managed threads at any given time. 32 * 33 * This class should be used to store thread specitic information that 34 * is necessary to execute managed code: 35 * - Frame 36 * - Exception 37 * - Interpreter cache 38 * - etc. 39 * 40 * Now it's used by interpreter to store current frame only. 41 */ 42 class ManagedThread : public Thread { 43 public: 44 enum ThreadState : uint8_t { NATIVE_CODE = 0, MANAGED_CODE = 1 }; 45 46 using NativeHandleType = os::thread::NativeHandleType; 47 static constexpr ThreadId NON_INITIALIZED_THREAD_ID = 0; 48 static constexpr ThreadId MAX_INTERNAL_THREAD_ID = MarkWord::LIGHT_LOCK_THREADID_MAX_COUNT; 49 static constexpr size_t STACK_MAX_SIZE_OVERFLOW_CHECK = 256_MB; 50 #if defined(PANDA_ASAN_ON) || defined(PANDA_TSAN_ON) || !defined(NDEBUG) 51 static constexpr size_t STACK_OVERFLOW_RESERVED_SIZE = 64_KB; 52 #else 53 static constexpr size_t STACK_OVERFLOW_RESERVED_SIZE = 12_KB; 54 #endif 55 static constexpr size_t STACK_OVERFLOW_PROTECTED_SIZE = 4_KB; 56 SetLanguageContext(const LanguageContext & ctx)57 void SetLanguageContext([[maybe_unused]] const LanguageContext &ctx) 58 { 59 // Deprecated method, don't use it. Only for copability with js_runtime. 60 } 61 SetCurrentFrame(Frame * f)62 void SetCurrentFrame(Frame *f) 63 { 64 frame_ = f; 65 } 66 GetPtThreadInfo()67 tooling::PtThreadInfo *GetPtThreadInfo() const 68 { 69 return ptThreadInfo_.get(); 70 } 71 GetCurrentFrame()72 Frame *GetCurrentFrame() const 73 { 74 return frame_; 75 } 76 GetFrame()77 void *GetFrame() const 78 { 79 void *fp = GetCurrentFrame(); 80 if (IsCurrentFrameCompiled()) { 81 return (StackWalker::IsBoundaryFrame<FrameKind::INTERPRETER>(fp)) 82 ? (StackWalker::GetPrevFromBoundary<FrameKind::COMPILER>(fp)) 83 : fp; 84 } 85 return fp; 86 } 87 IsCurrentFrameCompiled()88 bool IsCurrentFrameCompiled() const 89 { 90 return isCompiledFrame_; 91 } 92 SetCurrentFrameIsCompiled(bool value)93 void SetCurrentFrameIsCompiled(bool value) 94 { 95 isCompiledFrame_ = value; 96 } 97 SetException(ObjectHeader * exception)98 void SetException(ObjectHeader *exception) 99 { 100 exception_ = exception; 101 } 102 GetException()103 ObjectHeader *GetException() const 104 { 105 return exception_; 106 } 107 HasPendingException()108 bool HasPendingException() const 109 { 110 return exception_ != nullptr; 111 } 112 ClearException()113 void ClearException() 114 { 115 exception_ = nullptr; 116 } 117 GetIFrameStackSize()118 size_t GetIFrameStackSize() const 119 { 120 return iframeStackSize_; 121 } 122 ThreadIsManagedThread(const Thread * thread)123 static bool ThreadIsManagedThread(const Thread *thread) 124 { 125 ASSERT(thread != nullptr); 126 Thread::ThreadType threadType = thread->GetThreadType(); 127 return threadType == Thread::ThreadType::THREAD_TYPE_MANAGED || 128 threadType == Thread::ThreadType::THREAD_TYPE_MT_MANAGED || 129 threadType == Thread::ThreadType::THREAD_TYPE_TASK; 130 } 131 CastFromThread(Thread * thread)132 static ManagedThread *CastFromThread(Thread *thread) 133 { 134 ASSERT(thread != nullptr); 135 ASSERT(ThreadIsManagedThread(thread)); 136 return static_cast<ManagedThread *>(thread); 137 } 138 139 /** 140 * @brief GetCurrentRaw Unsafe method to get current ManagedThread. 141 * It can be used in hotspots to get the best performance. 142 * We can only use this method in places where the ManagedThread exists. 143 * @return pointer to ManagedThread 144 */ GetCurrentRaw()145 static ManagedThread *GetCurrentRaw() 146 { 147 return CastFromThread(Thread::GetCurrent()); 148 } 149 150 /** 151 * @brief GetCurrent Safe method to gets current ManagedThread. 152 * @return pointer to ManagedThread or nullptr (if current thread is not a managed thread) 153 */ GetCurrent()154 PANDA_PUBLIC_API static ManagedThread *GetCurrent() 155 { 156 Thread *thread = Thread::GetCurrent(); 157 ASSERT(thread != nullptr); 158 if (ThreadIsManagedThread(thread)) { 159 return CastFromThread(thread); 160 } 161 return nullptr; 162 } 163 164 static void Initialize(); 165 166 static void Shutdown(); 167 IsThreadAlive()168 bool IsThreadAlive() 169 { 170 return GetStatus() != ThreadStatus::FINISHED; 171 } 172 UpdateStatus(enum ThreadStatus status)173 void UpdateStatus(enum ThreadStatus status) 174 { 175 ASSERT(ManagedThread::GetCurrent() == this); 176 177 ThreadStatus oldStatus = GetStatus(); 178 if (oldStatus == ThreadStatus::RUNNING && status != ThreadStatus::RUNNING) { 179 TransitionFromRunningToSuspended(status); 180 } else if (oldStatus != ThreadStatus::RUNNING && status == ThreadStatus::RUNNING) { 181 // NB! This thread is treated as suspended so when we transition from suspended state to 182 // running we need to check suspension flag and counter so SafepointPoll has to be done before 183 // acquiring mutator_lock. 184 // StoreStatus acquires lock here 185 StoreStatus<CHECK_SAFEPOINT, READLOCK>(ThreadStatus::RUNNING); 186 } else if (oldStatus == ThreadStatus::NATIVE && status != ThreadStatus::IS_TERMINATED_LOOP && 187 IsRuntimeTerminated()) { 188 // If a daemon thread with NATIVE status was deregistered, it should not access any managed object, 189 // i.e. change its status from NATIVE, because such object may already be deleted by the runtime. 190 // In case its status is changed, we must call a Safepoint to terminate this thread. 191 // For example, if a daemon thread calls ManagedCodeBegin (which changes status from NATIVE to 192 // RUNNING), it may be interrupted by a GC thread, which changes status to IS_SUSPENDED. 193 StoreStatus<CHECK_SAFEPOINT>(status); 194 } else { 195 // NB! Status is not a simple bit, without atomics it can produce faulty GetStatus. 196 StoreStatus(status); 197 } 198 } 199 GetStatus()200 enum ThreadStatus GetStatus() 201 { 202 // Atomic with acquire order reason: data race with flags with dependecies on reads after 203 // the load which should become visible 204 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) 205 uint32_t resInt = fts_.asAtomic.load(std::memory_order_acquire); 206 return static_cast<enum ThreadStatus>(resInt >> THREAD_STATUS_OFFSET); 207 } 208 209 static PandaString ThreadStatusAsString(enum ThreadStatus status); 210 GetStackFrameAllocator()211 ark::mem::StackFrameAllocator *GetStackFrameAllocator() const 212 { 213 return stackFrameAllocator_; 214 } 215 GetLocalInternalAllocator()216 ark::mem::InternalAllocator<>::LocalSmallObjectAllocator *GetLocalInternalAllocator() const 217 { 218 return internalLocalAllocator_; 219 } 220 GetTLAB()221 mem::TLAB *GetTLAB() const 222 { 223 ASSERT(tlab_ != nullptr); 224 return tlab_; 225 } 226 227 void UpdateTLAB(mem::TLAB *tlab); 228 229 void ClearTLAB(); 230 SetStringClassPtr(void * p)231 void SetStringClassPtr(void *p) 232 { 233 stringClassPtr_ = p; 234 } 235 SetArrayU8ClassPtr(void * p)236 void SetArrayU8ClassPtr(void *p) 237 { 238 arrayU8ClassPtr_ = p; 239 } 240 SetArrayU16ClassPtr(void * p)241 void SetArrayU16ClassPtr(void *p) 242 { 243 arrayU16ClassPtr_ = p; 244 } 245 246 #ifndef NDEBUG IsRuntimeCallEnabled()247 bool IsRuntimeCallEnabled() const 248 { 249 return runtimeCallEnabled_ != 0; 250 } 251 #endif 252 253 static ManagedThread *Create(Runtime *runtime, PandaVM *vm, 254 ark::panda_file::SourceLang threadLang = ark::panda_file::SourceLang::PANDA_ASSEMBLY); 255 ~ManagedThread() override; 256 257 explicit ManagedThread(ThreadId id, mem::InternalAllocatorPtr allocator, PandaVM *vm, Thread::ThreadType threadType, 258 ark::panda_file::SourceLang threadLang = ark::panda_file::SourceLang::PANDA_ASSEMBLY); 259 260 // Here methods which are just proxy or cache for runtime interface 261 GetPreBarrierType()262 ALWAYS_INLINE mem::BarrierType GetPreBarrierType() const 263 { 264 return preBarrierType_; 265 } 266 GetPostBarrierType()267 ALWAYS_INLINE mem::BarrierType GetPostBarrierType() const 268 { 269 return postBarrierType_; 270 } 271 272 // Methods to access thread local storage GetInterpreterCache()273 InterpreterCache *GetInterpreterCache() 274 { 275 return &interpreterCache_; 276 } 277 GetNativePc()278 uintptr_t GetNativePc() const 279 { 280 return nativePc_; 281 } 282 SetNativePc(uintptr_t pc)283 void SetNativePc(uintptr_t pc) 284 { 285 nativePc_ = pc; 286 } 287 288 // buffers may be destroyed during Detach(), so it should be initialized once more 289 void InitBuffers(); 290 GetPreBuff()291 PandaVector<ObjectHeader *> *GetPreBuff() const 292 { 293 return preBuff_; 294 } 295 MovePreBuff()296 PandaVector<ObjectHeader *> *MovePreBuff() 297 { 298 auto res = preBuff_; 299 preBuff_ = nullptr; 300 return res; 301 } 302 GetG1PostBarrierBuffer()303 mem::GCG1BarrierSet::G1PostBarrierRingBufferType *GetG1PostBarrierBuffer() 304 { 305 return g1PostBarrierRingBuffer_; 306 } 307 ResetG1PostBarrierBuffer()308 void ResetG1PostBarrierBuffer() 309 { 310 g1PostBarrierRingBuffer_ = nullptr; 311 } 312 GetG1PostBarrierBufferOffset()313 static constexpr uint32_t GetG1PostBarrierBufferOffset() 314 { 315 return MEMBER_OFFSET(ManagedThread, g1PostBarrierRingBuffer_); 316 } 317 GetThreadLang()318 ark::panda_file::SourceLang GetThreadLang() const 319 { 320 return threadLang_; 321 } 322 GetWeightedTlabAverage()323 WeightedAdaptiveTlabAverage *GetWeightedTlabAverage() const 324 { 325 return weightedAdaptiveTlabAverage_; 326 } 327 328 PANDA_PUBLIC_API LanguageContext GetLanguageContext(); 329 IsSuspended()330 inline bool IsSuspended() 331 { 332 return ReadFlag(SUSPEND_REQUEST); 333 } 334 IsRuntimeTerminated()335 inline bool IsRuntimeTerminated() 336 { 337 return ReadFlag(RUNTIME_TERMINATION_REQUEST); 338 } 339 SetRuntimeTerminated()340 inline void SetRuntimeTerminated() 341 { 342 SetFlag(RUNTIME_TERMINATION_REQUEST); 343 } 344 GetFrameKindOffset()345 static constexpr uint32_t GetFrameKindOffset() 346 { 347 return MEMBER_OFFSET(ManagedThread, isCompiledFrame_); 348 } GetFlagOffset()349 static constexpr uint32_t GetFlagOffset() 350 { 351 return MEMBER_OFFSET(ManagedThread, fts_); 352 } 353 GetEntrypointsOffset()354 static constexpr uint32_t GetEntrypointsOffset() 355 { 356 return MEMBER_OFFSET(ManagedThread, entrypoints_); 357 } GetObjectOffset()358 static constexpr uint32_t GetObjectOffset() 359 { 360 return MEMBER_OFFSET(ManagedThread, object_); 361 } GetFrameOffset()362 static constexpr uint32_t GetFrameOffset() 363 { 364 return MEMBER_OFFSET(ManagedThread, frame_); 365 } GetExceptionOffset()366 static constexpr uint32_t GetExceptionOffset() 367 { 368 return MEMBER_OFFSET(ManagedThread, exception_); 369 } GetNativePcOffset()370 static constexpr uint32_t GetNativePcOffset() 371 { 372 return MEMBER_OFFSET(ManagedThread, nativePc_); 373 } GetTLABOffset()374 static constexpr uint32_t GetTLABOffset() 375 { 376 return MEMBER_OFFSET(ManagedThread, tlab_); 377 } GetTlsCardTableAddrOffset()378 static constexpr uint32_t GetTlsCardTableAddrOffset() 379 { 380 return MEMBER_OFFSET(ManagedThread, cardTableAddr_); 381 } GetTlsCardTableMinAddrOffset()382 static constexpr uint32_t GetTlsCardTableMinAddrOffset() 383 { 384 return MEMBER_OFFSET(ManagedThread, cardTableMinAddr_); 385 } GetTlsPostWrbOneObjectOffset()386 static constexpr uint32_t GetTlsPostWrbOneObjectOffset() 387 { 388 return MEMBER_OFFSET(ManagedThread, postWrbOneObject_); 389 } GetTlsPostWrbTwoObjectsOffset()390 static constexpr uint32_t GetTlsPostWrbTwoObjectsOffset() 391 { 392 return MEMBER_OFFSET(ManagedThread, postWrbTwoObjects_); 393 } GetTlsPreWrbEntrypointOffset()394 static constexpr uint32_t GetTlsPreWrbEntrypointOffset() 395 { 396 return MEMBER_OFFSET(ManagedThread, preWrbEntrypoint_); 397 } GetTlsStringClassPointerOffset()398 static constexpr uint32_t GetTlsStringClassPointerOffset() 399 { 400 return MEMBER_OFFSET(ManagedThread, stringClassPtr_); 401 } GetTlsArrayU8ClassPointerOffset()402 static constexpr uint32_t GetTlsArrayU8ClassPointerOffset() 403 { 404 return MEMBER_OFFSET(ManagedThread, arrayU8ClassPtr_); 405 } GetTlsArrayU16ClassPointerOffset()406 static constexpr uint32_t GetTlsArrayU16ClassPointerOffset() 407 { 408 return MEMBER_OFFSET(ManagedThread, arrayU16ClassPtr_); 409 } GetPreBuffOffset()410 static constexpr uint32_t GetPreBuffOffset() 411 { 412 return MEMBER_OFFSET(ManagedThread, preBuff_); 413 } 414 GetLanguageExtensionsDataOffset()415 static constexpr uint32_t GetLanguageExtensionsDataOffset() 416 { 417 return MEMBER_OFFSET(ManagedThread, languageExtensionData_); 418 } 419 GetRuntimeCallEnabledOffset()420 static constexpr uint32_t GetRuntimeCallEnabledOffset() 421 { 422 #ifndef NDEBUG 423 return MEMBER_OFFSET(ManagedThread, runtimeCallEnabled_); 424 #else 425 // it should not be used 426 return 0; 427 #endif 428 } 429 GetInterpreterCacheOffset()430 static constexpr uint32_t GetInterpreterCacheOffset() 431 { 432 return MEMBER_OFFSET(ManagedThread, interpreterCache_); 433 } 434 GetLanguageExtensionsData()435 void *GetLanguageExtensionsData() const 436 { 437 return languageExtensionData_; 438 } 439 SetLanguageExtensionsData(void * data)440 void SetLanguageExtensionsData(void *data) 441 { 442 languageExtensionData_ = data; 443 } 444 GetInternalIdOffset()445 static constexpr uint32_t GetInternalIdOffset() 446 { 447 return MEMBER_OFFSET(ManagedThread, internalId_); 448 } 449 450 virtual void VisitGCRoots(const ObjectVisitor &cb); 451 452 virtual void UpdateGCRoots(); 453 454 PANDA_PUBLIC_API void PushLocalObject(ObjectHeader **objectHeader); 455 456 PANDA_PUBLIC_API void PopLocalObject(); 457 458 void SetThreadPriority(int32_t prio); 459 460 uint32_t GetThreadPriority(); 461 462 // NO_THREAD_SANITIZE for invalid TSAN data race report ReadFlag(ThreadFlag flag)463 NO_THREAD_SANITIZE bool ReadFlag(ThreadFlag flag) const 464 { 465 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) 466 return (fts_.asStruct.flags & static_cast<uint16_t>(flag)) != 0; 467 } 468 TestAllFlags()469 NO_THREAD_SANITIZE bool TestAllFlags() const 470 { 471 return (fts_.asStruct.flags) != initialThreadFlag_; // NOLINT(cppcoreguidelines-pro-type-union-access) 472 } 473 SetFlag(ThreadFlag flag)474 void SetFlag(ThreadFlag flag) 475 { 476 // Atomic with seq_cst order reason: data race with flags with requirement for sequentially consistent order 477 // where threads observe all modifications in the same order 478 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) 479 fts_.asAtomic.fetch_or(flag, std::memory_order_seq_cst); 480 } 481 ClearFlag(ThreadFlag flag)482 void ClearFlag(ThreadFlag flag) 483 { 484 // Atomic with seq_cst order reason: data race with flags with requirement for sequentially consistent order 485 // where threads observe all modifications in the same order 486 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) 487 fts_.asAtomic.fetch_and(UINT32_MAX ^ flag, std::memory_order_seq_cst); 488 } 489 490 // Separate functions for NO_THREAD_SANITIZE to suppress TSAN data race report ReadFlagsAndThreadStatusUnsafe()491 NO_THREAD_SANITIZE uint32_t ReadFlagsAndThreadStatusUnsafe() 492 { 493 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) 494 return fts_.asInt; 495 } 496 IsManagedCodeAllowed()497 bool IsManagedCodeAllowed() const 498 { 499 return isManagedCodeAllowed_; 500 } 501 SetManagedCodeAllowed(bool allowed)502 void SetManagedCodeAllowed(bool allowed) 503 { 504 isManagedCodeAllowed_ = allowed; 505 } 506 507 // TaggedType has been specialized for js, Other types are empty implementation 508 template <typename T> 509 inline HandleScope<T> *PopHandleScope(); 510 511 // TaggedType has been specialized for js, Other types are empty implementation 512 template <typename T> 513 inline void PushHandleScope([[maybe_unused]] HandleScope<T> *handleScope); 514 515 // TaggedType has been specialized for js, Other types are empty implementation 516 template <typename T> 517 inline HandleScope<T> *GetTopScope() const; 518 519 // TaggedType has been specialized for js, Other types are empty implementation 520 template <typename T> 521 inline HandleStorage<T> *GetHandleStorage() const; 522 523 // TaggedType has been specialized for js, Other types are empty implementation 524 template <typename T> 525 inline GlobalHandleStorage<T> *GetGlobalHandleStorage() const; 526 527 PANDA_PUBLIC_API CustomTLSData *GetCustomTLSData(const char *key); 528 PANDA_PUBLIC_API void SetCustomTLSData(const char *key, CustomTLSData *data); 529 PANDA_PUBLIC_API bool EraseCustomTLSData(const char *key); 530 531 #if EVENT_METHOD_ENTER_ENABLED || EVENT_METHOD_EXIT_ENABLED RecordMethodEnter()532 uint32_t RecordMethodEnter() 533 { 534 return call_depth_++; 535 } 536 RecordMethodExit()537 uint32_t RecordMethodExit() 538 { 539 return --call_depth_; 540 } 541 #endif 542 IsAttached()543 bool IsAttached() 544 { 545 // Atomic with relaxed order reason: data race with is_attached_ with no synchronization or ordering constraints 546 // imposed on other reads or writes 547 return isAttached_.load(std::memory_order_relaxed); 548 } 549 SetAttached()550 void SetAttached() 551 { 552 // Atomic with relaxed order reason: data race with is_attached_ with no synchronization or ordering constraints 553 // imposed on other reads or writes 554 isAttached_.store(true, std::memory_order_relaxed); 555 } 556 SetDetached()557 void SetDetached() 558 { 559 // Atomic with relaxed order reason: data race with is_attached_ with no synchronization or ordering constraints 560 // imposed on other reads or writes 561 isAttached_.store(false, std::memory_order_relaxed); 562 } 563 IsVMThread()564 bool IsVMThread() 565 { 566 return isVmThread_; 567 } 568 SetVMThread()569 void SetVMThread() 570 { 571 isVmThread_ = true; 572 } 573 IsThrowingOOM()574 bool IsThrowingOOM() 575 { 576 return throwingOomCount_ > 0; 577 } 578 SetThrowingOOM(bool isThrowingOom)579 void SetThrowingOOM(bool isThrowingOom) 580 { 581 if (isThrowingOom) { 582 throwingOomCount_++; 583 return; 584 } 585 ASSERT(throwingOomCount_ > 0); 586 throwingOomCount_--; 587 } 588 IsUsePreAllocObj()589 bool IsUsePreAllocObj() 590 { 591 return usePreallocObj_; 592 } 593 SetUsePreAllocObj(bool usePreallocObj)594 void SetUsePreAllocObj(bool usePreallocObj) 595 { 596 usePreallocObj_ = usePreallocObj; 597 } 598 599 PANDA_PUBLIC_API void PrintSuspensionStackIfNeeded(); 600 GetId()601 ThreadId GetId() const 602 { 603 // Atomic with relaxed order reason: data race with id_ with no synchronization or ordering constraints imposed 604 // on other reads or writes 605 return id_.load(std::memory_order_relaxed); 606 } 607 608 void FreeInternalMemory() override; 609 void DestroyInternalResources(); 610 611 /// Clears the pre/post barrier buffers (and other resources) without deallocation. 612 void CleanupInternalResources(); 613 614 /// Collect TLAB metrics for memstats 615 void CollectTLABMetrics(); 616 617 void InitForStackOverflowCheck(size_t nativeStackReservedSize, size_t nativeStackProtectedSize); 618 virtual void DisableStackOverflowCheck(); 619 virtual void EnableStackOverflowCheck(); 620 /// Obtains current thread's native stack parameters and returns true on success 621 virtual bool RetrieveStackInfo(void *&stackAddr, size_t &stackSize, size_t &guardSize); 622 623 template <bool CHECK_NATIVE_STACK = true, bool CHECK_IFRAME_STACK = true> 624 ALWAYS_INLINE inline bool StackOverflowCheck(); 625 GetStackOverflowCheckOffset()626 static size_t GetStackOverflowCheckOffset() 627 { 628 return STACK_OVERFLOW_RESERVED_SIZE; 629 } 630 GetDebugDispatchTable()631 void *const *GetDebugDispatchTable() const 632 { 633 #ifdef PANDA_WITH_QUICKENER 634 return const_cast<void *const *>(GetOrSetInnerDebugDispatchTable()); 635 #else 636 return debugDispatchTable_; 637 #endif 638 } 639 SetDebugDispatchTable(const void * const * dispatchTable)640 void SetDebugDispatchTable(const void *const *dispatchTable) 641 { 642 #ifdef PANDA_WITH_QUICKENER 643 GetOrSetInnerDebugDispatchTable(true, dispatch_table); 644 #else 645 debugDispatchTable_ = const_cast<void *const *>(dispatchTable); 646 #endif 647 } 648 649 template <bool IS_DEBUG> GetCurrentDispatchTable()650 void *const *GetCurrentDispatchTable() const 651 { 652 #ifdef PANDA_WITH_QUICKENER 653 return const_cast<void *const *>(GetOrSetInnerDispatchTable<is_debug>()); 654 #else 655 if constexpr (IS_DEBUG) { 656 return debugStubDispatchTable_; 657 } else { 658 return dispatchTable_; 659 } 660 #endif 661 } 662 663 template <bool IS_DEBUG> SetCurrentDispatchTable(const void * const * dispatchTable)664 void SetCurrentDispatchTable(const void *const *dispatchTable) 665 { 666 #ifdef PANDA_WITH_QUICKENER 667 GetOrSetInnerDispatchTable<is_debug>(true, dispatch_table); 668 #else 669 if constexpr (IS_DEBUG) { 670 debugStubDispatchTable_ = const_cast<void *const *>(dispatchTable); 671 } else { 672 dispatchTable_ = const_cast<void *const *>(dispatchTable); 673 } 674 #endif 675 } 676 677 PANDA_PUBLIC_API void SuspendImpl(bool internalSuspend = false); 678 PANDA_PUBLIC_API void ResumeImpl(bool internalResume = false); 679 Suspend()680 virtual void Suspend() 681 { 682 SuspendImpl(); 683 } 684 Resume()685 virtual void Resume() 686 { 687 ResumeImpl(); 688 } 689 690 /// Transition to suspended and back to runnable, re-acquire share on mutator_lock_ 691 PANDA_PUBLIC_API void SuspendCheck(); 692 IsUserSuspended()693 bool IsUserSuspended() 694 { 695 return userCodeSuspendCount_ > 0; 696 } 697 698 /* @sync 1 699 * @description This synchronization point can be used to insert a new attribute or method 700 * into ManagedThread class. 701 */ 702 WaitSuspension()703 void WaitSuspension() 704 { 705 constexpr int TIMEOUT = 100; 706 auto oldStatus = GetStatus(); 707 PrintSuspensionStackIfNeeded(); 708 UpdateStatus(ThreadStatus::IS_SUSPENDED); 709 { 710 /* @sync 1 711 * @description Right after the thread updates its status to IS_SUSPENDED and right before beginning to wait 712 * for actual suspension 713 */ 714 os::memory::LockHolder lock(suspendLock_); 715 while (suspendCount_ > 0) { 716 suspendVar_.TimedWait(&suspendLock_, TIMEOUT); 717 // In case runtime is being terminated, we should abort suspension and release monitors 718 if (UNLIKELY(IsRuntimeTerminated())) { 719 suspendLock_.Unlock(); 720 OnRuntimeTerminated(); 721 UNREACHABLE(); 722 } 723 } 724 ASSERT(!IsSuspended()); 725 } 726 UpdateStatus(oldStatus); 727 } 728 OnRuntimeTerminated()729 virtual void OnRuntimeTerminated() {} 730 731 // NO_THREAD_SAFETY_ANALYSIS due to TSAN not being able to determine lock status TransitionFromRunningToSuspended(enum ThreadStatus status)732 void TransitionFromRunningToSuspended(enum ThreadStatus status) NO_THREAD_SAFETY_ANALYSIS 733 { 734 // Do Unlock after StoreStatus, because the thread requesting a suspension should see an updated status 735 StoreStatus(status); 736 GetMutatorLock()->Unlock(); 737 } 738 739 PANDA_PUBLIC_API void SafepointPoll(); 740 741 /** 742 * From NativeCode you can call ManagedCodeBegin. 743 * From ManagedCode you can call NativeCodeBegin. 744 * Call the same type is forbidden. 745 */ 746 virtual void NativeCodeBegin(); 747 virtual void NativeCodeEnd(); 748 [[nodiscard]] virtual bool IsInNativeCode() const; 749 750 virtual void ManagedCodeBegin(); 751 virtual void ManagedCodeEnd(); 752 [[nodiscard]] virtual bool IsManagedCode() const; 753 IsManagedScope()754 static bool IsManagedScope() 755 { 756 auto thread = GetCurrent(); 757 return thread != nullptr && thread->isManagedScope_; 758 } 759 760 [[nodiscard]] bool HasManagedCodeOnStack() const; 761 [[nodiscard]] bool HasClearStack() const; 762 763 protected: 764 void ProtectNativeStack(); 765 766 template <bool CHECK_NATIVE_STACK = true, bool CHECK_IFRAME_STACK = true> StackOverflowCheckResult()767 ALWAYS_INLINE inline bool StackOverflowCheckResult() const 768 { 769 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) 770 if constexpr (CHECK_NATIVE_STACK) { 771 if (UNLIKELY(__builtin_frame_address(0) < ToVoidPtr(nativeStackEnd_))) { 772 return false; 773 } 774 } 775 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) 776 if constexpr (CHECK_IFRAME_STACK) { 777 if (UNLIKELY(GetStackFrameAllocator()->GetAllocatedSize() > iframeStackSize_)) { 778 return false; 779 } 780 } 781 return true; 782 } 783 784 static const int WAIT_INTERVAL = 10; 785 786 template <typename T = void> GetAssociatedObject()787 T *GetAssociatedObject() 788 { 789 return reinterpret_cast<T *>(object_); 790 } 791 792 template <typename T> SetAssociatedObject(T * object)793 void SetAssociatedObject(T *object) 794 { 795 object_ = object; 796 } 797 InterruptPostImpl()798 virtual void InterruptPostImpl() {} 799 UpdateId(ThreadId id)800 void UpdateId(ThreadId id) 801 { 802 // Atomic with relaxed order reason: data race with id_ with no synchronization or ordering constraints imposed 803 // on other reads or writes 804 id_.store(id, std::memory_order_relaxed); 805 } 806 807 /** 808 * Prepares the ManagedThread instance for caching and further reuse by resetting its member variables to their 809 * default values. 810 */ 811 virtual void CleanUp(); 812 813 private: 814 enum SafepointFlag : bool { DONT_CHECK_SAFEPOINT = false, CHECK_SAFEPOINT = true }; 815 enum ReadlockFlag : bool { NO_READLOCK = false, READLOCK = true }; 816 817 PandaString LogThreadStack(ThreadState newState) const; 818 819 // NO_THREAD_SAFETY_ANALYSIS due to TSAN not being able to determine lock status 820 template <SafepointFlag SAFEPOINT = DONT_CHECK_SAFEPOINT, ReadlockFlag READLOCK_FLAG = NO_READLOCK> StoreStatus(ThreadStatus status)821 void StoreStatus(ThreadStatus status) NO_THREAD_SAFETY_ANALYSIS 822 { 823 while (true) { 824 union FlagsAndThreadStatus oldFts { 825 }; 826 union FlagsAndThreadStatus newFts { 827 }; 828 oldFts.asInt = ReadFlagsAndThreadStatusUnsafe(); // NOLINT(cppcoreguidelines-pro-type-union-access) 829 830 // NOLINTNEXTLINE(readability-braces-around-statements, hicpp-braces-around-statements) 831 if constexpr (SAFEPOINT == CHECK_SAFEPOINT) { // NOLINT(bugprone-suspicious-semicolon) 832 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) 833 if (oldFts.asStruct.flags != initialThreadFlag_) { 834 // someone requires a safepoint 835 SafepointPoll(); 836 continue; 837 } 838 } 839 840 newFts.asStruct.flags = oldFts.asStruct.flags; // NOLINT(cppcoreguidelines-pro-type-union-access) 841 newFts.asStruct.status = status; // NOLINT(cppcoreguidelines-pro-type-union-access) 842 843 // mutator lock should be acquired before change status 844 // to avoid blocking in running state 845 // NOLINTNEXTLINE(readability-braces-around-statements, hicpp-braces-around-statements) 846 if constexpr (READLOCK_FLAG == READLOCK) { // NOLINT(bugprone-suspicious-semicolon) 847 GetMutatorLock()->ReadLock(); 848 } 849 850 // clang-format conflicts with CodeCheckAgent, so disable it here 851 // clang-format off 852 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) 853 if (fts_.asAtomic.compare_exchange_weak( 854 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) 855 oldFts.asNonvolatileInt, newFts.asNonvolatileInt, std::memory_order_release)) { 856 // If CAS succeeded, we set new status and no request occurred here, safe to proceed. 857 break; 858 } 859 // Release mutator lock to acquire it on the next loop iteration 860 // clang-format on 861 // NOLINTNEXTLINE(readability-braces-around-statements, hicpp-braces-around-statements) 862 if constexpr (READLOCK_FLAG == READLOCK) { // NOLINT(bugprone-suspicious-semicolon) 863 GetMutatorLock()->Unlock(); 864 } 865 } 866 } 867 868 #ifdef PANDA_WITH_QUICKENER 869 NO_OPTIMIZE const void *const *GetOrSetInnerDebugDispatchTable(bool set = false, 870 const void *const *dispatch_table = nullptr) const 871 { 872 thread_local static const void *const *current_debug_dispatch_table = nullptr; 873 if (set) { 874 current_debug_dispatch_table = dispatch_table; 875 } 876 return current_debug_dispatch_table; 877 } 878 879 template <bool IS_DEBUG> 880 NO_OPTIMIZE const void *const *GetOrSetInnerDispatchTable(bool set = false, 881 const void *const *dispatch_table = nullptr) const 882 { 883 thread_local static const void *const *current_dispatch_table = nullptr; 884 if (set) { 885 current_dispatch_table = dispatch_table; 886 } 887 return current_dispatch_table; 888 } 889 #endif 890 891 virtual bool TestLockState() const; 892 893 static constexpr uint32_t THREAD_STATUS_OFFSET = 16; 894 static_assert(sizeof(fts_) == sizeof(uint32_t), "Wrong fts_ size"); 895 896 // Can cause data races if child thread's UpdateId is executed concurrently with GetNativeThreadId 897 std::atomic<ThreadId> id_; 898 899 static mem::TLAB *zeroTlab_; 900 PandaVector<ObjectHeader **> localObjects_; 901 WeightedAdaptiveTlabAverage *weightedAdaptiveTlabAverage_ {nullptr}; 902 903 // Something like custom TLS - it is faster to access via ManagedThread than via thread_local 904 InterpreterCache interpreterCache_; 905 906 PandaMap<const char *, PandaUniquePtr<CustomTLSData>> customTlsCache_ GUARDED_BY(Locks::customTlsLock_); 907 908 mem::GCG1BarrierSet::G1PostBarrierRingBufferType *g1PostBarrierRingBuffer_ {nullptr}; 909 // Keep these here to speed up interpreter 910 mem::BarrierType preBarrierType_ {mem::BarrierType::PRE_WRB_NONE}; 911 mem::BarrierType postBarrierType_ {mem::BarrierType::POST_WRB_NONE}; 912 // Thread local storages to avoid locks in heap manager 913 mem::StackFrameAllocator *stackFrameAllocator_; 914 mem::InternalAllocator<>::LocalSmallObjectAllocator *internalLocalAllocator_; 915 std::atomic_bool isAttached_ {false}; // Can be changed after thread is registered and can cause data race 916 bool isVmThread_ = false; 917 918 bool isManagedCodeAllowed_ {true}; 919 920 size_t throwingOomCount_ {0}; 921 bool usePreallocObj_ {false}; 922 923 ark::panda_file::SourceLang threadLang_ = ark::panda_file::SourceLang::PANDA_ASSEMBLY; 924 925 PandaUniquePtr<tooling::PtThreadInfo> ptThreadInfo_; 926 927 // for stack overflow check 928 // |..... Method 1 ....| 929 // |..... Method 2 ....| 930 // |..... Method 3 ....|_ _ _ native_stack_top 931 // |..........................| 932 // |..........................| 933 // |..........................| 934 // |..........................| 935 // |..........................| 936 // |..........................| 937 // |..........................|_ _ _ native_stack_end 938 // |..... Reserved region ....| 939 // |.... Protected region ....|_ _ _ native_stack_begin 940 // |...... Guard region ......| 941 uintptr_t nativeStackBegin_ {0}; 942 // end of stack for managed thread, throw exception if native stack grow over it 943 uintptr_t nativeStackEnd_ {0}; 944 // os thread stack size 945 size_t nativeStackSize_ {0}; 946 // guard region size of stack 947 size_t nativeStackGuardSize_ {0}; 948 // reserved region is for throw exception handle if stack overflow happen 949 size_t nativeStackReservedSize_ {0}; 950 // protected region is for compiled code to test load [sp - native_stack_reserved_size_] to trigger segv 951 size_t nativeStackProtectedSize_ {0}; 952 // max allowed size for interpreter frame 953 size_t iframeStackSize_ {std::numeric_limits<size_t>::max()}; 954 955 PandaVector<HandleScope<coretypes::TaggedType> *> taggedHandleScopes_ {}; 956 HandleStorage<coretypes::TaggedType> *taggedHandleStorage_ {nullptr}; 957 GlobalHandleStorage<coretypes::TaggedType> *taggedGlobalHandleStorage_ {nullptr}; 958 959 PandaVector<HandleScope<ObjectHeader *> *> objectHeaderHandleScopes_ {}; 960 HandleStorage<ObjectHeader *> *objectHeaderHandleStorage_ {nullptr}; 961 962 os::memory::ConditionVariable suspendVar_ GUARDED_BY(suspendLock_); 963 os::memory::Mutex suspendLock_; 964 uint32_t suspendCount_ GUARDED_BY(suspendLock_) = 0; 965 std::atomic_uint32_t userCodeSuspendCount_ {0}; 966 967 PandaStack<ThreadState> threadFrameStates_; 968 969 // Boolean which is safe to access after runtime is destroyed 970 bool isManagedScope_ {false}; 971 972 friend class ark::test::ThreadTest; 973 friend class ark::MTThreadManager; 974 975 // Used in mathod events 976 uint32_t callDepth_ {0}; 977 #ifndef PANDA_WITH_QUICKENER 978 void *const *debugDispatchTable_ {nullptr}; 979 void *const *debugStubDispatchTable_ {nullptr}; 980 void *const *dispatchTable_ {nullptr}; 981 #endif 982 983 NO_COPY_SEMANTIC(ManagedThread); 984 NO_MOVE_SEMANTIC(ManagedThread); 985 }; 986 } // namespace ark 987 988 #endif // PANDA_RUNTIME_MANAGED_THREAD_H 989