• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2024-2025 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_RUNTIME_MANAGED_THREAD_H
16 #define PANDA_RUNTIME_MANAGED_THREAD_H
17 
18 #include "thread.h"
19 
20 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
21 #define ASSERT_MANAGED_CODE() ASSERT(::ark::ManagedThread::GetCurrent()->IsManagedCode())
22 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
23 #define ASSERT_NATIVE_CODE() ASSERT(::ark::ManagedThread::GetCurrent()->IsInNativeCode())
24 
25 namespace ark {
26 class MTThreadManager;
27 /**
28  * @brief Class represents managed thread
29  *
30  * When the thread is created it registers itself in the runtime, so
31  * runtime knows about all managed threads at any given time.
32  *
33  * This class should be used to store thread specitic information that
34  * is necessary to execute managed code:
35  *  - Frame
36  *  - Exception
37  *  - Interpreter cache
38  *  - etc.
39  *
40  *  Now it's used by interpreter to store current frame only.
41  */
42 class ManagedThread : public Thread {
43 public:
44     enum ThreadState : uint8_t { NATIVE_CODE = 0, MANAGED_CODE = 1 };
45 
46     using NativeHandleType = os::thread::NativeHandleType;
47     static constexpr ThreadId NON_INITIALIZED_THREAD_ID = 0;
48     static constexpr ThreadId MAX_INTERNAL_THREAD_ID = MarkWord::LIGHT_LOCK_THREADID_MAX_COUNT;
49     static constexpr size_t STACK_MAX_SIZE_OVERFLOW_CHECK = 256_MB;
50 #if defined(PANDA_ASAN_ON) || defined(PANDA_TSAN_ON) || !defined(NDEBUG)
51     static constexpr size_t STACK_OVERFLOW_RESERVED_SIZE = 64_KB;
52 #else
53     static constexpr size_t STACK_OVERFLOW_RESERVED_SIZE = 12_KB;
54 #endif
55     static constexpr size_t STACK_OVERFLOW_PROTECTED_SIZE = 4_KB;
56 
SetLanguageContext(const LanguageContext & ctx)57     void SetLanguageContext([[maybe_unused]] const LanguageContext &ctx)
58     {
59         // Deprecated method, don't use it. Only for copability with js_runtime.
60     }
61 
SetCurrentFrame(Frame * f)62     void SetCurrentFrame(Frame *f)
63     {
64         frame_ = f;
65     }
66 
GetPtThreadInfo()67     tooling::PtThreadInfo *GetPtThreadInfo() const
68     {
69         return ptThreadInfo_.get();
70     }
71 
GetCurrentFrame()72     Frame *GetCurrentFrame() const
73     {
74         return frame_;
75     }
76 
GetFrame()77     void *GetFrame() const
78     {
79         void *fp = GetCurrentFrame();
80         if (IsCurrentFrameCompiled()) {
81             return (StackWalker::IsBoundaryFrame<FrameKind::INTERPRETER>(fp))
82                        ? (StackWalker::GetPrevFromBoundary<FrameKind::COMPILER>(fp))
83                        : fp;
84         }
85         return fp;
86     }
87 
IsCurrentFrameCompiled()88     bool IsCurrentFrameCompiled() const
89     {
90         return isCompiledFrame_;
91     }
92 
SetCurrentFrameIsCompiled(bool value)93     void SetCurrentFrameIsCompiled(bool value)
94     {
95         isCompiledFrame_ = value;
96     }
97 
SetException(ObjectHeader * exception)98     void SetException(ObjectHeader *exception)
99     {
100         exception_ = exception;
101     }
102 
GetException()103     ObjectHeader *GetException() const
104     {
105         return exception_;
106     }
107 
HasPendingException()108     bool HasPendingException() const
109     {
110         return exception_ != nullptr;
111     }
112 
ClearException()113     void ClearException()
114     {
115         exception_ = nullptr;
116     }
117 
GetIFrameStackSize()118     size_t GetIFrameStackSize() const
119     {
120         return iframeStackSize_;
121     }
122 
ThreadIsManagedThread(const Thread * thread)123     static bool ThreadIsManagedThread(const Thread *thread)
124     {
125         ASSERT(thread != nullptr);
126         Thread::ThreadType threadType = thread->GetThreadType();
127         return threadType == Thread::ThreadType::THREAD_TYPE_MANAGED ||
128                threadType == Thread::ThreadType::THREAD_TYPE_MT_MANAGED ||
129                threadType == Thread::ThreadType::THREAD_TYPE_TASK;
130     }
131 
CastFromThread(Thread * thread)132     static ManagedThread *CastFromThread(Thread *thread)
133     {
134         ASSERT(thread != nullptr);
135         ASSERT(ThreadIsManagedThread(thread));
136         return static_cast<ManagedThread *>(thread);
137     }
138 
139     /**
140      * @brief GetCurrentRaw Unsafe method to get current ManagedThread.
141      * It can be used in hotspots to get the best performance.
142      * We can only use this method in places where the ManagedThread exists.
143      * @return pointer to ManagedThread
144      */
GetCurrentRaw()145     static ManagedThread *GetCurrentRaw()
146     {
147         return CastFromThread(Thread::GetCurrent());
148     }
149 
150     /**
151      * @brief GetCurrent Safe method to gets current ManagedThread.
152      * @return pointer to ManagedThread or nullptr (if current thread is not a managed thread)
153      */
GetCurrent()154     PANDA_PUBLIC_API static ManagedThread *GetCurrent()
155     {
156         Thread *thread = Thread::GetCurrent();
157         ASSERT(thread != nullptr);
158         if (ThreadIsManagedThread(thread)) {
159             return CastFromThread(thread);
160         }
161         return nullptr;
162     }
163 
164     static void Initialize();
165 
166     static void Shutdown();
167 
168     static PandaString ThreadStatusAsString(enum ThreadStatus status);
169 
GetStackFrameAllocator()170     ark::mem::StackFrameAllocator *GetStackFrameAllocator() const
171     {
172         return stackFrameAllocator_;
173     }
174 
GetLocalInternalAllocator()175     ark::mem::InternalAllocator<>::LocalSmallObjectAllocator *GetLocalInternalAllocator() const
176     {
177         return internalLocalAllocator_;
178     }
179 
GetTLAB()180     mem::TLAB *GetTLAB() const
181     {
182         ASSERT(tlab_ != nullptr);
183         return tlab_;
184     }
185 
186     void UpdateTLAB(mem::TLAB *tlab);
187 
188     void ClearTLAB();
189 
SetStringClassPtr(void * p)190     void SetStringClassPtr(void *p)
191     {
192         stringClassPtr_ = p;
193     }
194 
SetArrayU8ClassPtr(void * p)195     void SetArrayU8ClassPtr(void *p)
196     {
197         arrayU8ClassPtr_ = p;
198     }
199 
SetArrayU16ClassPtr(void * p)200     void SetArrayU16ClassPtr(void *p)
201     {
202         arrayU16ClassPtr_ = p;
203     }
204 
205 #ifndef NDEBUG
IsRuntimeCallEnabled()206     bool IsRuntimeCallEnabled() const
207     {
208         return runtimeCallEnabled_ != 0;
209     }
210 #endif
211 
212     static ManagedThread *Create(Runtime *runtime, PandaVM *vm,
213                                  ark::panda_file::SourceLang threadLang = ark::panda_file::SourceLang::PANDA_ASSEMBLY);
214     ~ManagedThread() override;
215 
216     explicit ManagedThread(ThreadId id, mem::InternalAllocatorPtr allocator, PandaVM *vm, Thread::ThreadType threadType,
217                            ark::panda_file::SourceLang threadLang = ark::panda_file::SourceLang::PANDA_ASSEMBLY);
218 
219     // Here methods which are just proxy or cache for runtime interface
220 
GetPreBarrierType()221     ALWAYS_INLINE mem::BarrierType GetPreBarrierType() const
222     {
223         return preBarrierType_;
224     }
225 
GetPostBarrierType()226     ALWAYS_INLINE mem::BarrierType GetPostBarrierType() const
227     {
228         return postBarrierType_;
229     }
230 
231     // Methods to access thread local storage
GetInterpreterCache()232     InterpreterCache *GetInterpreterCache()
233     {
234         return &interpreterCache_;
235     }
236 
GetNativePc()237     uintptr_t GetNativePc() const
238     {
239         return nativePc_;
240     }
241 
SetNativePc(uintptr_t pc)242     void SetNativePc(uintptr_t pc)
243     {
244         nativePc_ = pc;
245     }
246 
247     // buffers may be destroyed during Detach(), so it should be initialized once more
248     void InitBuffers();
249 
GetPreBuff()250     PandaVector<ObjectHeader *> *GetPreBuff() const
251     {
252         return preBuff_;
253     }
254 
MovePreBuff()255     PandaVector<ObjectHeader *> *MovePreBuff()
256     {
257         auto res = preBuff_;
258         preBuff_ = nullptr;
259         return res;
260     }
261 
GetG1PostBarrierBuffer()262     mem::GCG1BarrierSet::G1PostBarrierRingBufferType *GetG1PostBarrierBuffer()
263     {
264         return g1PostBarrierRingBuffer_;
265     }
266 
ResetG1PostBarrierBuffer()267     void ResetG1PostBarrierBuffer()
268     {
269         g1PostBarrierRingBuffer_ = nullptr;
270     }
271 
GetG1PostBarrierBufferOffset()272     static constexpr uint32_t GetG1PostBarrierBufferOffset()
273     {
274         return MEMBER_OFFSET(ManagedThread, g1PostBarrierRingBuffer_);
275     }
276 
GetThreadLang()277     ark::panda_file::SourceLang GetThreadLang() const
278     {
279         return threadLang_;
280     }
281 
GetWeightedTlabAverage()282     WeightedAdaptiveTlabAverage *GetWeightedTlabAverage() const
283     {
284         return weightedAdaptiveTlabAverage_;
285     }
286 
287     PANDA_PUBLIC_API LanguageContext GetLanguageContext();
288 
GetFrameKindOffset()289     static constexpr uint32_t GetFrameKindOffset()
290     {
291         return MEMBER_OFFSET(ManagedThread, isCompiledFrame_);
292     }
GetFlagOffset()293     static constexpr uint32_t GetFlagOffset()
294     {
295         return ThreadProxy::GetFlagOffset();
296     }
297 
GetEntrypointsOffset()298     static constexpr uint32_t GetEntrypointsOffset()
299     {
300         return MEMBER_OFFSET(ManagedThread, entrypoints_);
301     }
GetObjectOffset()302     static constexpr uint32_t GetObjectOffset()
303     {
304         return MEMBER_OFFSET(ManagedThread, object_);
305     }
GetFrameOffset()306     static constexpr uint32_t GetFrameOffset()
307     {
308         return MEMBER_OFFSET(ManagedThread, frame_);
309     }
GetExceptionOffset()310     static constexpr uint32_t GetExceptionOffset()
311     {
312         return MEMBER_OFFSET(ManagedThread, exception_);
313     }
GetNativePcOffset()314     static constexpr uint32_t GetNativePcOffset()
315     {
316         return MEMBER_OFFSET(ManagedThread, nativePc_);
317     }
GetTLABOffset()318     static constexpr uint32_t GetTLABOffset()
319     {
320         return MEMBER_OFFSET(ManagedThread, tlab_);
321     }
GetTlsCardTableAddrOffset()322     static constexpr uint32_t GetTlsCardTableAddrOffset()
323     {
324         return MEMBER_OFFSET(ManagedThread, cardTableAddr_);
325     }
GetTlsCardTableMinAddrOffset()326     static constexpr uint32_t GetTlsCardTableMinAddrOffset()
327     {
328         return MEMBER_OFFSET(ManagedThread, cardTableMinAddr_);
329     }
GetTlsPostWrbOneObjectOffset()330     static constexpr uint32_t GetTlsPostWrbOneObjectOffset()
331     {
332         return MEMBER_OFFSET(ManagedThread, postWrbOneObject_);
333     }
GetTlsPostWrbTwoObjectsOffset()334     static constexpr uint32_t GetTlsPostWrbTwoObjectsOffset()
335     {
336         return MEMBER_OFFSET(ManagedThread, postWrbTwoObjects_);
337     }
GetTlsPreWrbEntrypointOffset()338     static constexpr uint32_t GetTlsPreWrbEntrypointOffset()
339     {
340         return MEMBER_OFFSET(ManagedThread, preWrbEntrypoint_);
341     }
GetTlsStringClassPointerOffset()342     static constexpr uint32_t GetTlsStringClassPointerOffset()
343     {
344         return MEMBER_OFFSET(ManagedThread, stringClassPtr_);
345     }
GetTlsArrayU8ClassPointerOffset()346     static constexpr uint32_t GetTlsArrayU8ClassPointerOffset()
347     {
348         return MEMBER_OFFSET(ManagedThread, arrayU8ClassPtr_);
349     }
GetTlsArrayU16ClassPointerOffset()350     static constexpr uint32_t GetTlsArrayU16ClassPointerOffset()
351     {
352         return MEMBER_OFFSET(ManagedThread, arrayU16ClassPtr_);
353     }
GetPreBuffOffset()354     static constexpr uint32_t GetPreBuffOffset()
355     {
356         return MEMBER_OFFSET(ManagedThread, preBuff_);
357     }
358 
GetLanguageExtensionsDataOffset()359     static constexpr uint32_t GetLanguageExtensionsDataOffset()
360     {
361         return MEMBER_OFFSET(ManagedThread, languageExtensionData_);
362     }
363 
GetRuntimeCallEnabledOffset()364     static constexpr uint32_t GetRuntimeCallEnabledOffset()
365     {
366 #ifndef NDEBUG
367         return MEMBER_OFFSET(ManagedThread, runtimeCallEnabled_);
368 #else
369         // it should not be used
370         return 0;
371 #endif
372     }
373 
GetInterpreterCacheOffset()374     static constexpr uint32_t GetInterpreterCacheOffset()
375     {
376         return MEMBER_OFFSET(ManagedThread, interpreterCache_);
377     }
378 
GetLanguageExtensionsData()379     void *GetLanguageExtensionsData() const
380     {
381         return languageExtensionData_;
382     }
383 
SetLanguageExtensionsData(void * data)384     void SetLanguageExtensionsData(void *data)
385     {
386         languageExtensionData_ = data;
387     }
388 
GetInternalIdOffset()389     static constexpr uint32_t GetInternalIdOffset()
390     {
391         return MEMBER_OFFSET(ManagedThread, internalId_);
392     }
393 
394     virtual void VisitGCRoots(const ObjectVisitor &cb);
395 
396     virtual void UpdateGCRoots(const GCRootUpdater &gcRootUpdater);
397 
398     PANDA_PUBLIC_API void PushLocalObject(ObjectHeader **objectHeader);
399 
400     PANDA_PUBLIC_API void PopLocalObject();
401 
402     void SetThreadPriority(int32_t prio);
403 
404     uint32_t GetThreadPriority();
405 
IsManagedCodeAllowed()406     bool IsManagedCodeAllowed() const
407     {
408         return isManagedCodeAllowed_;
409     }
410 
SetManagedCodeAllowed(bool allowed)411     void SetManagedCodeAllowed(bool allowed)
412     {
413         isManagedCodeAllowed_ = allowed;
414     }
415 
416     // TaggedType has been specialized for js, Other types are empty implementation
417     template <typename T>
418     inline HandleScope<T> *PopHandleScope();
419 
420     // TaggedType has been specialized for js, Other types are empty implementation
421     template <typename T>
422     inline void PushHandleScope([[maybe_unused]] HandleScope<T> *handleScope);
423 
424     // TaggedType has been specialized for js, Other types are empty implementation
425     template <typename T>
426     inline HandleScope<T> *GetTopScope() const;
427 
428     // TaggedType has been specialized for js, Other types are empty implementation
429     template <typename T>
430     inline HandleStorage<T> *GetHandleStorage() const;
431 
432     // TaggedType has been specialized for js, Other types are empty implementation
433     template <typename T>
434     inline GlobalHandleStorage<T> *GetGlobalHandleStorage() const;
435 
436     PANDA_PUBLIC_API CustomTLSData *GetCustomTLSData(const char *key);
437     PANDA_PUBLIC_API void SetCustomTLSData(const char *key, CustomTLSData *data);
438     PANDA_PUBLIC_API bool EraseCustomTLSData(const char *key);
439 
440 #if EVENT_METHOD_ENTER_ENABLED || EVENT_METHOD_EXIT_ENABLED
RecordMethodEnter()441     uint32_t RecordMethodEnter()
442     {
443         return call_depth_++;
444     }
445 
RecordMethodExit()446     uint32_t RecordMethodExit()
447     {
448         return --call_depth_;
449     }
450 #endif
451 
IsAttached()452     bool IsAttached()
453     {
454         // Atomic with relaxed order reason: data race with is_attached_ with no synchronization or ordering constraints
455         // imposed on other reads or writes
456         return isAttached_.load(std::memory_order_relaxed);
457     }
458 
SetAttached()459     void SetAttached()
460     {
461         // Atomic with relaxed order reason: data race with is_attached_ with no synchronization or ordering constraints
462         // imposed on other reads or writes
463         isAttached_.store(true, std::memory_order_relaxed);
464     }
465 
SetDetached()466     void SetDetached()
467     {
468         // Atomic with relaxed order reason: data race with is_attached_ with no synchronization or ordering constraints
469         // imposed on other reads or writes
470         isAttached_.store(false, std::memory_order_relaxed);
471     }
472 
IsVMThread()473     bool IsVMThread()
474     {
475         return isVmThread_;
476     }
477 
SetVMThread()478     void SetVMThread()
479     {
480         isVmThread_ = true;
481     }
482 
IsThrowingOOM()483     bool IsThrowingOOM()
484     {
485         return throwingOomCount_ > 0;
486     }
487 
SetThrowingOOM(bool isThrowingOom)488     void SetThrowingOOM(bool isThrowingOom)
489     {
490         if (isThrowingOom) {
491             throwingOomCount_++;
492             return;
493         }
494         ASSERT(throwingOomCount_ > 0);
495         throwingOomCount_--;
496     }
497 
IsUsePreAllocObj()498     bool IsUsePreAllocObj()
499     {
500         return usePreallocObj_;
501     }
502 
SetUsePreAllocObj(bool usePreallocObj)503     void SetUsePreAllocObj(bool usePreallocObj)
504     {
505         usePreallocObj_ = usePreallocObj;
506     }
507 
508     PANDA_PUBLIC_API void PrintSuspensionStackIfNeeded() override;
509 
GetId()510     ThreadId GetId() const override
511     {
512         // Atomic with relaxed order reason: data race with id_ with no synchronization or ordering constraints imposed
513         // on other reads or writes
514         return id_.load(std::memory_order_relaxed);
515     }
516 
517     void FreeInternalMemory() override;
518     void DestroyInternalResources();
519 
520     /// Clears the pre/post barrier buffers (and other resources) without deallocation.
521     void CleanupInternalResources();
522 
523     /// Collect TLAB metrics for memstats
524     void CollectTLABMetrics();
525 
526     void InitForStackOverflowCheck(size_t nativeStackReservedSize, size_t nativeStackProtectedSize);
527     virtual void DisableStackOverflowCheck();
528     virtual void EnableStackOverflowCheck();
529     /// Obtains current thread's native stack parameters and returns true on success
530     virtual bool RetrieveStackInfo(void *&stackAddr, size_t &stackSize, size_t &guardSize);
531 
532     template <bool CHECK_NATIVE_STACK = true, bool CHECK_IFRAME_STACK = true>
533     ALWAYS_INLINE inline bool StackOverflowCheck();
534 
GetStackOverflowCheckOffset()535     static size_t GetStackOverflowCheckOffset()
536     {
537         return STACK_OVERFLOW_RESERVED_SIZE;
538     }
539 
GetDebugDispatchTable()540     void *const *GetDebugDispatchTable() const
541     {
542 #ifdef PANDA_WITH_QUICKENER
543         return const_cast<void *const *>(GetOrSetInnerDebugDispatchTable());
544 #else
545         return debugDispatchTable_;
546 #endif
547     }
548 
SetDebugDispatchTable(const void * const * dispatchTable)549     void SetDebugDispatchTable(const void *const *dispatchTable)
550     {
551 #ifdef PANDA_WITH_QUICKENER
552         GetOrSetInnerDebugDispatchTable(true, dispatch_table);
553 #else
554         debugDispatchTable_ = const_cast<void *const *>(dispatchTable);
555 #endif
556     }
557 
558     template <bool IS_DEBUG>
GetCurrentDispatchTable()559     void *const *GetCurrentDispatchTable() const
560     {
561 #ifdef PANDA_WITH_QUICKENER
562         return const_cast<void *const *>(GetOrSetInnerDispatchTable<is_debug>());
563 #else
564         if constexpr (IS_DEBUG) {
565             return debugStubDispatchTable_;
566         } else {
567             return dispatchTable_;
568         }
569 #endif
570     }
571 
572     template <bool IS_DEBUG>
SetCurrentDispatchTable(const void * const * dispatchTable)573     void SetCurrentDispatchTable(const void *const *dispatchTable)
574     {
575 #ifdef PANDA_WITH_QUICKENER
576         GetOrSetInnerDispatchTable<is_debug>(true, dispatch_table);
577 #else
578         if constexpr (IS_DEBUG) {
579             debugStubDispatchTable_ = const_cast<void *const *>(dispatchTable);
580         } else {
581             dispatchTable_ = const_cast<void *const *>(dispatchTable);
582         }
583 #endif
584     }
585 
Suspend()586     virtual void Suspend()
587     {
588         SuspendImpl();
589     }
590 
Resume()591     virtual void Resume()
592     {
593         ResumeImpl();
594     }
595 
596     /**
597      * From NativeCode you can call ManagedCodeBegin.
598      * From ManagedCode you can call NativeCodeBegin.
599      * Call the same type is forbidden.
600      */
601     virtual void NativeCodeBegin();
602     virtual void NativeCodeEnd();
603     [[nodiscard]] virtual bool IsInNativeCode() const;
604 
605     virtual void ManagedCodeBegin();
606     virtual void ManagedCodeEnd();
607     [[nodiscard]] virtual bool IsManagedCode() const;
608 
IsManagedScope()609     static bool IsManagedScope()
610     {
611         auto thread = GetCurrent();
612         return thread != nullptr && thread->isManagedScope_;
613     }
614 
615     [[nodiscard]] bool HasManagedCodeOnStack() const;
616     [[nodiscard]] bool HasClearStack() const;
617 
618 protected:
619     void ProtectNativeStack();
620 
621     template <bool CHECK_NATIVE_STACK = true, bool CHECK_IFRAME_STACK = true>
StackOverflowCheckResult()622     ALWAYS_INLINE inline bool StackOverflowCheckResult() const
623     {
624         // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
625         if constexpr (CHECK_NATIVE_STACK) {
626             if (UNLIKELY(__builtin_frame_address(0) < ToVoidPtr(nativeStackEnd_))) {
627                 return false;
628             }
629         }
630         // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
631         if constexpr (CHECK_IFRAME_STACK) {
632             if (UNLIKELY(GetStackFrameAllocator()->GetAllocatedSize() > iframeStackSize_)) {
633                 return false;
634             }
635         }
636         return true;
637     }
638 
639     static const int WAIT_INTERVAL = 10;
640 
641     template <typename T = void>
GetAssociatedObject()642     T *GetAssociatedObject()
643     {
644         return reinterpret_cast<T *>(object_);
645     }
646 
647     template <typename T>
SetAssociatedObject(T * object)648     void SetAssociatedObject(T *object)
649     {
650         object_ = object;
651     }
652 
InterruptPostImpl()653     virtual void InterruptPostImpl() {}
654 
UpdateId(ThreadId id)655     void UpdateId(ThreadId id)
656     {
657         // Atomic with relaxed order reason: data race with id_ with no synchronization or ordering constraints imposed
658         // on other reads or writes
659         id_.store(id, std::memory_order_relaxed);
660     }
661 
662     /**
663      * Prepares the ManagedThread instance for caching and further reuse by resetting its member variables to their
664      * default values.
665      */
666     virtual void CleanUp();
667 
668 private:
669     PandaString LogThreadStack(ThreadState newState) const;
670 
671 #ifdef PANDA_WITH_QUICKENER
672     NO_OPTIMIZE const void *const *GetOrSetInnerDebugDispatchTable(bool set = false,
673                                                                    const void *const *dispatch_table = nullptr) const
674     {
675         thread_local static const void *const *current_debug_dispatch_table = nullptr;
676         if (set) {
677             current_debug_dispatch_table = dispatch_table;
678         }
679         return current_debug_dispatch_table;
680     }
681 
682     template <bool IS_DEBUG>
683     NO_OPTIMIZE const void *const *GetOrSetInnerDispatchTable(bool set = false,
684                                                               const void *const *dispatch_table = nullptr) const
685     {
686         thread_local static const void *const *current_dispatch_table = nullptr;
687         if (set) {
688             current_dispatch_table = dispatch_table;
689         }
690         return current_dispatch_table;
691     }
692 #endif
693 
694     virtual bool TestLockState() const;
695 
696     // Can cause data races if child thread's UpdateId is executed concurrently with GetNativeThreadId
697     std::atomic<ThreadId> id_;
698 
699     static mem::TLAB *zeroTlab_;
700     PandaVector<ObjectHeader **> localObjects_;
701     WeightedAdaptiveTlabAverage *weightedAdaptiveTlabAverage_ {nullptr};
702 
703     // Something like custom TLS - it is faster to access via ManagedThread than via thread_local
704     InterpreterCache interpreterCache_;
705 
706     PandaMap<const char *, PandaUniquePtr<CustomTLSData>> customTlsCache_ GUARDED_BY(Locks::customTlsLock_);
707 
708     mem::GCG1BarrierSet::G1PostBarrierRingBufferType *g1PostBarrierRingBuffer_ {nullptr};
709     // Keep these here to speed up interpreter
710     mem::BarrierType preBarrierType_ {mem::BarrierType::PRE_WRB_NONE};
711     mem::BarrierType postBarrierType_ {mem::BarrierType::POST_WRB_NONE};
712     // Thread local storages to avoid locks in heap manager
713     mem::StackFrameAllocator *stackFrameAllocator_;
714     mem::InternalAllocator<>::LocalSmallObjectAllocator *internalLocalAllocator_;
715     std::atomic_bool isAttached_ {false};  // Can be changed after thread is registered and can cause data race
716     bool isVmThread_ = false;
717 
718     bool isManagedCodeAllowed_ {true};
719 
720     size_t throwingOomCount_ {0};
721     bool usePreallocObj_ {false};
722 
723     ark::panda_file::SourceLang threadLang_ = ark::panda_file::SourceLang::PANDA_ASSEMBLY;
724 
725     PandaUniquePtr<tooling::PtThreadInfo> ptThreadInfo_;
726 
727     // for stack overflow check
728     // |.....     Method 1    ....|
729     // |.....     Method 2    ....|
730     // |.....     Method 3    ....|_ _ _ native_stack_top
731     // |..........................|
732     // |..........................|
733     // |..........................|
734     // |..........................|
735     // |..........................|
736     // |..........................|
737     // |..........................|_ _ _ native_stack_end
738     // |..... Reserved region ....|
739     // |.... Protected region ....|_ _ _ native_stack_begin
740     // |...... Guard region ......|
741     uintptr_t nativeStackBegin_ {0};
742     // end of stack for managed thread, throw exception if native stack grow over it
743     uintptr_t nativeStackEnd_ {0};
744     // os thread stack size
745     size_t nativeStackSize_ {0};
746     // guard region size of stack
747     size_t nativeStackGuardSize_ {0};
748     // reserved region is for throw exception handle if stack overflow happen
749     size_t nativeStackReservedSize_ {0};
750     // protected region is for compiled code to test load [sp - native_stack_reserved_size_] to trigger segv
751     size_t nativeStackProtectedSize_ {0};
752     // max allowed size for interpreter frame
753     size_t iframeStackSize_ {std::numeric_limits<size_t>::max()};
754 
755     PandaVector<HandleScope<coretypes::TaggedType> *> taggedHandleScopes_ {};
756     HandleStorage<coretypes::TaggedType> *taggedHandleStorage_ {nullptr};
757     GlobalHandleStorage<coretypes::TaggedType> *taggedGlobalHandleStorage_ {nullptr};
758 
759     PandaVector<HandleScope<ObjectHeader *> *> objectHeaderHandleScopes_ {};
760     HandleStorage<ObjectHeader *> *objectHeaderHandleStorage_ {nullptr};
761 
762     PandaStack<ThreadState> threadFrameStates_;
763 
764     // Boolean which is safe to access after runtime is destroyed
765     bool isManagedScope_ {false};
766 
767     friend class ark::test::ThreadTest;
768     friend class ark::MTThreadManager;
769 
770     // Used in mathod events
771     uint32_t callDepth_ {0};
772 #ifndef PANDA_WITH_QUICKENER
773     void *const *debugDispatchTable_ {nullptr};
774     void *const *debugStubDispatchTable_ {nullptr};
775     void *const *dispatchTable_ {nullptr};
776 #endif
777 
778     NO_COPY_SEMANTIC(ManagedThread);
779     NO_MOVE_SEMANTIC(ManagedThread);
780 };
781 }  // namespace ark
782 
783 #endif  // PANDA_RUNTIME_MANAGED_THREAD_H
784