• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_RUNTIME_MANAGED_THREAD_H
16 #define PANDA_RUNTIME_MANAGED_THREAD_H
17 
18 #include "thread.h"
19 
20 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
21 #define ASSERT_MANAGED_CODE() ASSERT(::ark::ManagedThread::GetCurrent()->IsManagedCode())
22 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
23 #define ASSERT_NATIVE_CODE() ASSERT(::ark::ManagedThread::GetCurrent()->IsInNativeCode())
24 
25 namespace ark {
26 class MTThreadManager;
27 /**
28  * @brief Class represents managed thread
29  *
30  * When the thread is created it registers itself in the runtime, so
31  * runtime knows about all managed threads at any given time.
32  *
33  * This class should be used to store thread specitic information that
34  * is necessary to execute managed code:
35  *  - Frame
36  *  - Exception
37  *  - Interpreter cache
38  *  - etc.
39  *
40  *  Now it's used by interpreter to store current frame only.
41  */
42 class ManagedThread : public Thread {
43 public:
44     enum ThreadState : uint8_t { NATIVE_CODE = 0, MANAGED_CODE = 1 };
45 
46     using NativeHandleType = os::thread::NativeHandleType;
47     static constexpr ThreadId NON_INITIALIZED_THREAD_ID = 0;
48     static constexpr ThreadId MAX_INTERNAL_THREAD_ID = MarkWord::LIGHT_LOCK_THREADID_MAX_COUNT;
49     static constexpr size_t STACK_MAX_SIZE_OVERFLOW_CHECK = 256_MB;
50 #if defined(PANDA_ASAN_ON) || defined(PANDA_TSAN_ON) || !defined(NDEBUG)
51     static constexpr size_t STACK_OVERFLOW_RESERVED_SIZE = 64_KB;
52 #else
53     static constexpr size_t STACK_OVERFLOW_RESERVED_SIZE = 12_KB;
54 #endif
55     static constexpr size_t STACK_OVERFLOW_PROTECTED_SIZE = 4_KB;
56 
SetLanguageContext(const LanguageContext & ctx)57     void SetLanguageContext([[maybe_unused]] const LanguageContext &ctx)
58     {
59         // Deprecated method, don't use it. Only for copability with js_runtime.
60     }
61 
SetCurrentFrame(Frame * f)62     void SetCurrentFrame(Frame *f)
63     {
64         frame_ = f;
65     }
66 
GetPtThreadInfo()67     tooling::PtThreadInfo *GetPtThreadInfo() const
68     {
69         return ptThreadInfo_.get();
70     }
71 
GetCurrentFrame()72     Frame *GetCurrentFrame() const
73     {
74         return frame_;
75     }
76 
GetFrame()77     void *GetFrame() const
78     {
79         void *fp = GetCurrentFrame();
80         if (IsCurrentFrameCompiled()) {
81             return (StackWalker::IsBoundaryFrame<FrameKind::INTERPRETER>(fp))
82                        ? (StackWalker::GetPrevFromBoundary<FrameKind::COMPILER>(fp))
83                        : fp;
84         }
85         return fp;
86     }
87 
IsCurrentFrameCompiled()88     bool IsCurrentFrameCompiled() const
89     {
90         return isCompiledFrame_;
91     }
92 
SetCurrentFrameIsCompiled(bool value)93     void SetCurrentFrameIsCompiled(bool value)
94     {
95         isCompiledFrame_ = value;
96     }
97 
SetException(ObjectHeader * exception)98     void SetException(ObjectHeader *exception)
99     {
100         exception_ = exception;
101     }
102 
GetException()103     ObjectHeader *GetException() const
104     {
105         return exception_;
106     }
107 
HasPendingException()108     bool HasPendingException() const
109     {
110         return exception_ != nullptr;
111     }
112 
ClearException()113     void ClearException()
114     {
115         exception_ = nullptr;
116     }
117 
GetIFrameStackSize()118     size_t GetIFrameStackSize() const
119     {
120         return iframeStackSize_;
121     }
122 
ThreadIsManagedThread(const Thread * thread)123     static bool ThreadIsManagedThread(const Thread *thread)
124     {
125         ASSERT(thread != nullptr);
126         Thread::ThreadType threadType = thread->GetThreadType();
127         return threadType == Thread::ThreadType::THREAD_TYPE_MANAGED ||
128                threadType == Thread::ThreadType::THREAD_TYPE_MT_MANAGED ||
129                threadType == Thread::ThreadType::THREAD_TYPE_TASK;
130     }
131 
CastFromThread(Thread * thread)132     static ManagedThread *CastFromThread(Thread *thread)
133     {
134         ASSERT(thread != nullptr);
135         ASSERT(ThreadIsManagedThread(thread));
136         return static_cast<ManagedThread *>(thread);
137     }
138 
139     /**
140      * @brief GetCurrentRaw Unsafe method to get current ManagedThread.
141      * It can be used in hotspots to get the best performance.
142      * We can only use this method in places where the ManagedThread exists.
143      * @return pointer to ManagedThread
144      */
GetCurrentRaw()145     static ManagedThread *GetCurrentRaw()
146     {
147         return CastFromThread(Thread::GetCurrent());
148     }
149 
150     /**
151      * @brief GetCurrent Safe method to gets current ManagedThread.
152      * @return pointer to ManagedThread or nullptr (if current thread is not a managed thread)
153      */
GetCurrent()154     PANDA_PUBLIC_API static ManagedThread *GetCurrent()
155     {
156         Thread *thread = Thread::GetCurrent();
157         ASSERT(thread != nullptr);
158         if (ThreadIsManagedThread(thread)) {
159             return CastFromThread(thread);
160         }
161         return nullptr;
162     }
163 
164     static void Initialize();
165 
166     static void Shutdown();
167 
IsThreadAlive()168     bool IsThreadAlive()
169     {
170         return GetStatus() != ThreadStatus::FINISHED;
171     }
172 
UpdateStatus(enum ThreadStatus status)173     void UpdateStatus(enum ThreadStatus status)
174     {
175         ASSERT(ManagedThread::GetCurrent() == this);
176 
177         ThreadStatus oldStatus = GetStatus();
178         if (oldStatus == ThreadStatus::RUNNING && status != ThreadStatus::RUNNING) {
179             TransitionFromRunningToSuspended(status);
180         } else if (oldStatus != ThreadStatus::RUNNING && status == ThreadStatus::RUNNING) {
181             // NB! This thread is treated as suspended so when we transition from suspended state to
182             // running we need to check suspension flag and counter so SafepointPoll has to be done before
183             // acquiring mutator_lock.
184             // StoreStatus acquires lock here
185             StoreStatus<CHECK_SAFEPOINT, READLOCK>(ThreadStatus::RUNNING);
186         } else if (oldStatus == ThreadStatus::NATIVE && status != ThreadStatus::IS_TERMINATED_LOOP &&
187                    IsRuntimeTerminated()) {
188             // If a daemon thread with NATIVE status was deregistered, it should not access any managed object,
189             // i.e. change its status from NATIVE, because such object may already be deleted by the runtime.
190             // In case its status is changed, we must call a Safepoint to terminate this thread.
191             // For example, if a daemon thread calls ManagedCodeBegin (which changes status from NATIVE to
192             // RUNNING), it may be interrupted by a GC thread, which changes status to IS_SUSPENDED.
193             StoreStatus<CHECK_SAFEPOINT>(status);
194         } else {
195             // NB! Status is not a simple bit, without atomics it can produce faulty GetStatus.
196             StoreStatus(status);
197         }
198     }
199 
GetStatus()200     enum ThreadStatus GetStatus()
201     {
202         // Atomic with acquire order reason: data race with flags with dependecies on reads after
203         // the load which should become visible
204         // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access)
205         uint32_t resInt = fts_.asAtomic.load(std::memory_order_acquire);
206         return static_cast<enum ThreadStatus>(resInt >> THREAD_STATUS_OFFSET);
207     }
208 
209     static PandaString ThreadStatusAsString(enum ThreadStatus status);
210 
GetStackFrameAllocator()211     ark::mem::StackFrameAllocator *GetStackFrameAllocator() const
212     {
213         return stackFrameAllocator_;
214     }
215 
GetLocalInternalAllocator()216     ark::mem::InternalAllocator<>::LocalSmallObjectAllocator *GetLocalInternalAllocator() const
217     {
218         return internalLocalAllocator_;
219     }
220 
GetTLAB()221     mem::TLAB *GetTLAB() const
222     {
223         ASSERT(tlab_ != nullptr);
224         return tlab_;
225     }
226 
227     void UpdateTLAB(mem::TLAB *tlab);
228 
229     void ClearTLAB();
230 
SetStringClassPtr(void * p)231     void SetStringClassPtr(void *p)
232     {
233         stringClassPtr_ = p;
234     }
235 
SetArrayU16ClassPtr(void * p)236     void SetArrayU16ClassPtr(void *p)
237     {
238         arrayU16ClassPtr_ = p;
239     }
240 
241 #ifndef NDEBUG
IsRuntimeCallEnabled()242     bool IsRuntimeCallEnabled() const
243     {
244         return runtimeCallEnabled_ != 0;
245     }
246 #endif
247 
248     static ManagedThread *Create(Runtime *runtime, PandaVM *vm,
249                                  ark::panda_file::SourceLang threadLang = ark::panda_file::SourceLang::PANDA_ASSEMBLY);
250     ~ManagedThread() override;
251 
252     explicit ManagedThread(ThreadId id, mem::InternalAllocatorPtr allocator, PandaVM *vm, Thread::ThreadType threadType,
253                            ark::panda_file::SourceLang threadLang = ark::panda_file::SourceLang::PANDA_ASSEMBLY);
254 
255     // Here methods which are just proxy or cache for runtime interface
256 
GetPreBarrierType()257     ALWAYS_INLINE mem::BarrierType GetPreBarrierType() const
258     {
259         return preBarrierType_;
260     }
261 
GetPostBarrierType()262     ALWAYS_INLINE mem::BarrierType GetPostBarrierType() const
263     {
264         return postBarrierType_;
265     }
266 
267     // Methods to access thread local storage
GetInterpreterCache()268     InterpreterCache *GetInterpreterCache()
269     {
270         return &interpreterCache_;
271     }
272 
GetNativePc()273     uintptr_t GetNativePc() const
274     {
275         return nativePc_;
276     }
277 
SetNativePc(uintptr_t pc)278     void SetNativePc(uintptr_t pc)
279     {
280         nativePc_ = pc;
281     }
282 
283     // buffers may be destroyed during Detach(), so it should be initialized once more
284     void InitBuffers();
285 
GetPreBuff()286     PandaVector<ObjectHeader *> *GetPreBuff() const
287     {
288         return preBuff_;
289     }
290 
MovePreBuff()291     PandaVector<ObjectHeader *> *MovePreBuff()
292     {
293         auto res = preBuff_;
294         preBuff_ = nullptr;
295         return res;
296     }
297 
GetG1PostBarrierBuffer()298     mem::GCG1BarrierSet::G1PostBarrierRingBufferType *GetG1PostBarrierBuffer()
299     {
300         return g1PostBarrierRingBuffer_;
301     }
302 
ResetG1PostBarrierBuffer()303     void ResetG1PostBarrierBuffer()
304     {
305         g1PostBarrierRingBuffer_ = nullptr;
306     }
307 
GetG1PostBarrierBufferOffset()308     static constexpr uint32_t GetG1PostBarrierBufferOffset()
309     {
310         return MEMBER_OFFSET(ManagedThread, g1PostBarrierRingBuffer_);
311     }
312 
GetThreadLang()313     ark::panda_file::SourceLang GetThreadLang() const
314     {
315         return threadLang_;
316     }
317 
GetWeightedTlabAverage()318     WeightedAdaptiveTlabAverage *GetWeightedTlabAverage() const
319     {
320         return weightedAdaptiveTlabAverage_;
321     }
322 
323     PANDA_PUBLIC_API LanguageContext GetLanguageContext();
324 
IsSuspended()325     inline bool IsSuspended()
326     {
327         return ReadFlag(SUSPEND_REQUEST);
328     }
329 
IsRuntimeTerminated()330     inline bool IsRuntimeTerminated()
331     {
332         return ReadFlag(RUNTIME_TERMINATION_REQUEST);
333     }
334 
SetRuntimeTerminated()335     inline void SetRuntimeTerminated()
336     {
337         SetFlag(RUNTIME_TERMINATION_REQUEST);
338     }
339 
GetFrameKindOffset()340     static constexpr uint32_t GetFrameKindOffset()
341     {
342         return MEMBER_OFFSET(ManagedThread, isCompiledFrame_);
343     }
GetFlagOffset()344     static constexpr uint32_t GetFlagOffset()
345     {
346         return MEMBER_OFFSET(ManagedThread, fts_);
347     }
348 
GetEntrypointsOffset()349     static constexpr uint32_t GetEntrypointsOffset()
350     {
351         return MEMBER_OFFSET(ManagedThread, entrypoints_);
352     }
GetObjectOffset()353     static constexpr uint32_t GetObjectOffset()
354     {
355         return MEMBER_OFFSET(ManagedThread, object_);
356     }
GetFrameOffset()357     static constexpr uint32_t GetFrameOffset()
358     {
359         return MEMBER_OFFSET(ManagedThread, frame_);
360     }
GetExceptionOffset()361     static constexpr uint32_t GetExceptionOffset()
362     {
363         return MEMBER_OFFSET(ManagedThread, exception_);
364     }
GetNativePcOffset()365     static constexpr uint32_t GetNativePcOffset()
366     {
367         return MEMBER_OFFSET(ManagedThread, nativePc_);
368     }
GetTLABOffset()369     static constexpr uint32_t GetTLABOffset()
370     {
371         return MEMBER_OFFSET(ManagedThread, tlab_);
372     }
GetTlsCardTableAddrOffset()373     static constexpr uint32_t GetTlsCardTableAddrOffset()
374     {
375         return MEMBER_OFFSET(ManagedThread, cardTableAddr_);
376     }
GetTlsCardTableMinAddrOffset()377     static constexpr uint32_t GetTlsCardTableMinAddrOffset()
378     {
379         return MEMBER_OFFSET(ManagedThread, cardTableMinAddr_);
380     }
GetTlsPostWrbOneObjectOffset()381     static constexpr uint32_t GetTlsPostWrbOneObjectOffset()
382     {
383         return MEMBER_OFFSET(ManagedThread, postWrbOneObject_);
384     }
GetTlsPostWrbTwoObjectsOffset()385     static constexpr uint32_t GetTlsPostWrbTwoObjectsOffset()
386     {
387         return MEMBER_OFFSET(ManagedThread, postWrbTwoObjects_);
388     }
GetTlsPreWrbEntrypointOffset()389     static constexpr uint32_t GetTlsPreWrbEntrypointOffset()
390     {
391         return MEMBER_OFFSET(ManagedThread, preWrbEntrypoint_);
392     }
GetTlsStringClassPointerOffset()393     static constexpr uint32_t GetTlsStringClassPointerOffset()
394     {
395         return MEMBER_OFFSET(ManagedThread, stringClassPtr_);
396     }
GetTlsArrayU16ClassPointerOffset()397     static constexpr uint32_t GetTlsArrayU16ClassPointerOffset()
398     {
399         return MEMBER_OFFSET(ManagedThread, arrayU16ClassPtr_);
400     }
GetPreBuffOffset()401     static constexpr uint32_t GetPreBuffOffset()
402     {
403         return MEMBER_OFFSET(ManagedThread, preBuff_);
404     }
405 
GetLanguageExtensionsDataOffset()406     static constexpr uint32_t GetLanguageExtensionsDataOffset()
407     {
408         return MEMBER_OFFSET(ManagedThread, languageExtensionData_);
409     }
410 
GetRuntimeCallEnabledOffset()411     static constexpr uint32_t GetRuntimeCallEnabledOffset()
412     {
413 #ifndef NDEBUG
414         return MEMBER_OFFSET(ManagedThread, runtimeCallEnabled_);
415 #else
416         // it should not be used
417         return 0;
418 #endif
419     }
420 
GetInterpreterCacheOffset()421     static constexpr uint32_t GetInterpreterCacheOffset()
422     {
423         return MEMBER_OFFSET(ManagedThread, interpreterCache_);
424     }
425 
GetLanguageExtensionsData()426     void *GetLanguageExtensionsData() const
427     {
428         return languageExtensionData_;
429     }
430 
SetLanguageExtensionsData(void * data)431     void SetLanguageExtensionsData(void *data)
432     {
433         languageExtensionData_ = data;
434     }
435 
GetInternalIdOffset()436     static constexpr uint32_t GetInternalIdOffset()
437     {
438         return MEMBER_OFFSET(ManagedThread, internalId_);
439     }
440 
441     virtual void VisitGCRoots(const ObjectVisitor &cb);
442 
443     virtual void UpdateGCRoots();
444 
445     PANDA_PUBLIC_API void PushLocalObject(ObjectHeader **objectHeader);
446 
447     PANDA_PUBLIC_API void PopLocalObject();
448 
449     void SetThreadPriority(int32_t prio);
450 
451     uint32_t GetThreadPriority();
452 
453     // NO_THREAD_SANITIZE for invalid TSAN data race report
ReadFlag(ThreadFlag flag)454     NO_THREAD_SANITIZE bool ReadFlag(ThreadFlag flag) const
455     {
456         // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access)
457         return (fts_.asStruct.flags & static_cast<uint16_t>(flag)) != 0;
458     }
459 
TestAllFlags()460     NO_THREAD_SANITIZE bool TestAllFlags() const
461     {
462         return (fts_.asStruct.flags) != initialThreadFlag_;  // NOLINT(cppcoreguidelines-pro-type-union-access)
463     }
464 
SetFlag(ThreadFlag flag)465     void SetFlag(ThreadFlag flag)
466     {
467         // Atomic with seq_cst order reason: data race with flags with requirement for sequentially consistent order
468         // where threads observe all modifications in the same order
469         // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access)
470         fts_.asAtomic.fetch_or(flag, std::memory_order_seq_cst);
471     }
472 
ClearFlag(ThreadFlag flag)473     void ClearFlag(ThreadFlag flag)
474     {
475         // Atomic with seq_cst order reason: data race with flags with requirement for sequentially consistent order
476         // where threads observe all modifications in the same order
477         // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access)
478         fts_.asAtomic.fetch_and(UINT32_MAX ^ flag, std::memory_order_seq_cst);
479     }
480 
481     // Separate functions for NO_THREAD_SANITIZE to suppress TSAN data race report
ReadFlagsAndThreadStatusUnsafe()482     NO_THREAD_SANITIZE uint32_t ReadFlagsAndThreadStatusUnsafe()
483     {
484         // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access)
485         return fts_.asInt;
486     }
487 
IsManagedCodeAllowed()488     bool IsManagedCodeAllowed() const
489     {
490         return isManagedCodeAllowed_;
491     }
492 
SetManagedCodeAllowed(bool allowed)493     void SetManagedCodeAllowed(bool allowed)
494     {
495         isManagedCodeAllowed_ = allowed;
496     }
497 
498     // TaggedType has been specialized for js, Other types are empty implementation
499     template <typename T>
500     inline HandleScope<T> *PopHandleScope();
501 
502     // TaggedType has been specialized for js, Other types are empty implementation
503     template <typename T>
504     inline void PushHandleScope([[maybe_unused]] HandleScope<T> *handleScope);
505 
506     // TaggedType has been specialized for js, Other types are empty implementation
507     template <typename T>
508     inline HandleScope<T> *GetTopScope() const;
509 
510     // TaggedType has been specialized for js, Other types are empty implementation
511     template <typename T>
512     inline HandleStorage<T> *GetHandleStorage() const;
513 
514     // TaggedType has been specialized for js, Other types are empty implementation
515     template <typename T>
516     inline GlobalHandleStorage<T> *GetGlobalHandleStorage() const;
517 
518     PANDA_PUBLIC_API CustomTLSData *GetCustomTLSData(const char *key);
519     PANDA_PUBLIC_API void SetCustomTLSData(const char *key, CustomTLSData *data);
520     PANDA_PUBLIC_API bool EraseCustomTLSData(const char *key);
521 
522 #if EVENT_METHOD_ENTER_ENABLED || EVENT_METHOD_EXIT_ENABLED
RecordMethodEnter()523     uint32_t RecordMethodEnter()
524     {
525         return call_depth_++;
526     }
527 
RecordMethodExit()528     uint32_t RecordMethodExit()
529     {
530         return --call_depth_;
531     }
532 #endif
533 
IsAttached()534     bool IsAttached()
535     {
536         // Atomic with relaxed order reason: data race with is_attached_ with no synchronization or ordering constraints
537         // imposed on other reads or writes
538         return isAttached_.load(std::memory_order_relaxed);
539     }
540 
SetAttached()541     void SetAttached()
542     {
543         // Atomic with relaxed order reason: data race with is_attached_ with no synchronization or ordering constraints
544         // imposed on other reads or writes
545         isAttached_.store(true, std::memory_order_relaxed);
546     }
547 
SetDetached()548     void SetDetached()
549     {
550         // Atomic with relaxed order reason: data race with is_attached_ with no synchronization or ordering constraints
551         // imposed on other reads or writes
552         isAttached_.store(false, std::memory_order_relaxed);
553     }
554 
IsVMThread()555     bool IsVMThread()
556     {
557         return isVmThread_;
558     }
559 
SetVMThread()560     void SetVMThread()
561     {
562         isVmThread_ = true;
563     }
564 
IsThrowingOOM()565     bool IsThrowingOOM()
566     {
567         return throwingOomCount_ > 0;
568     }
569 
SetThrowingOOM(bool isThrowingOom)570     void SetThrowingOOM(bool isThrowingOom)
571     {
572         if (isThrowingOom) {
573             throwingOomCount_++;
574             return;
575         }
576         ASSERT(throwingOomCount_ > 0);
577         throwingOomCount_--;
578     }
579 
IsUsePreAllocObj()580     bool IsUsePreAllocObj()
581     {
582         return usePreallocObj_;
583     }
584 
SetUsePreAllocObj(bool usePreallocObj)585     void SetUsePreAllocObj(bool usePreallocObj)
586     {
587         usePreallocObj_ = usePreallocObj;
588     }
589 
590     PANDA_PUBLIC_API void PrintSuspensionStackIfNeeded();
591 
GetId()592     ThreadId GetId() const
593     {
594         // Atomic with relaxed order reason: data race with id_ with no synchronization or ordering constraints imposed
595         // on other reads or writes
596         return id_.load(std::memory_order_relaxed);
597     }
598 
599     void FreeInternalMemory() override;
600     void DestroyInternalResources();
601 
602     /// Clears the pre/post barrier buffers (and other resources) without deallocation.
603     void CleanupInternalResources();
604 
605     void InitForStackOverflowCheck(size_t nativeStackReservedSize, size_t nativeStackProtectedSize);
606     virtual void DisableStackOverflowCheck();
607     virtual void EnableStackOverflowCheck();
608     /// Obtains current thread's native stack parameters and returns true on success
609     virtual bool RetrieveStackInfo(void *&stackAddr, size_t &stackSize, size_t &guardSize);
610 
611     template <bool CHECK_NATIVE_STACK = true, bool CHECK_IFRAME_STACK = true>
612     ALWAYS_INLINE inline bool StackOverflowCheck();
613 
GetStackOverflowCheckOffset()614     static size_t GetStackOverflowCheckOffset()
615     {
616         return STACK_OVERFLOW_RESERVED_SIZE;
617     }
618 
GetDebugDispatchTable()619     void *const *GetDebugDispatchTable() const
620     {
621 #ifdef PANDA_WITH_QUICKENER
622         return const_cast<void *const *>(GetOrSetInnerDebugDispatchTable());
623 #else
624         return debugDispatchTable_;
625 #endif
626     }
627 
SetDebugDispatchTable(const void * const * dispatchTable)628     void SetDebugDispatchTable(const void *const *dispatchTable)
629     {
630 #ifdef PANDA_WITH_QUICKENER
631         GetOrSetInnerDebugDispatchTable(true, dispatch_table);
632 #else
633         debugDispatchTable_ = const_cast<void *const *>(dispatchTable);
634 #endif
635     }
636 
637     template <bool IS_DEBUG>
GetCurrentDispatchTable()638     void *const *GetCurrentDispatchTable() const
639     {
640 #ifdef PANDA_WITH_QUICKENER
641         return const_cast<void *const *>(GetOrSetInnerDispatchTable<is_debug>());
642 #else
643         if constexpr (IS_DEBUG) {
644             return debugStubDispatchTable_;
645         } else {
646             return dispatchTable_;
647         }
648 #endif
649     }
650 
651     template <bool IS_DEBUG>
SetCurrentDispatchTable(const void * const * dispatchTable)652     void SetCurrentDispatchTable(const void *const *dispatchTable)
653     {
654 #ifdef PANDA_WITH_QUICKENER
655         GetOrSetInnerDispatchTable<is_debug>(true, dispatch_table);
656 #else
657         if constexpr (IS_DEBUG) {
658             debugStubDispatchTable_ = const_cast<void *const *>(dispatchTable);
659         } else {
660             dispatchTable_ = const_cast<void *const *>(dispatchTable);
661         }
662 #endif
663     }
664 
665     PANDA_PUBLIC_API void SuspendImpl(bool internalSuspend = false);
666     PANDA_PUBLIC_API void ResumeImpl(bool internalResume = false);
667 
Suspend()668     virtual void Suspend()
669     {
670         SuspendImpl();
671     }
672 
Resume()673     virtual void Resume()
674     {
675         ResumeImpl();
676     }
677 
678     /// Transition to suspended and back to runnable, re-acquire share on mutator_lock_
679     PANDA_PUBLIC_API void SuspendCheck();
680 
IsUserSuspended()681     bool IsUserSuspended()
682     {
683         return userCodeSuspendCount_ > 0;
684     }
685 
686     /* @sync 1
687      * @description This synchronization point can be used to insert a new attribute or method
688      * into ManagedThread class.
689      */
690 
WaitSuspension()691     void WaitSuspension()
692     {
693         constexpr int TIMEOUT = 100;
694         auto oldStatus = GetStatus();
695         PrintSuspensionStackIfNeeded();
696         UpdateStatus(ThreadStatus::IS_SUSPENDED);
697         {
698             /* @sync 1
699              * @description Right after the thread updates its status to IS_SUSPENDED and right before beginning to wait
700              * for actual suspension
701              */
702             os::memory::LockHolder lock(suspendLock_);
703             while (suspendCount_ > 0) {
704                 suspendVar_.TimedWait(&suspendLock_, TIMEOUT);
705                 // In case runtime is being terminated, we should abort suspension and release monitors
706                 if (UNLIKELY(IsRuntimeTerminated())) {
707                     suspendLock_.Unlock();
708                     OnRuntimeTerminated();
709                     UNREACHABLE();
710                 }
711             }
712             ASSERT(!IsSuspended());
713         }
714         UpdateStatus(oldStatus);
715     }
716 
OnRuntimeTerminated()717     virtual void OnRuntimeTerminated() {}
718 
719     // NO_THREAD_SAFETY_ANALYSIS due to TSAN not being able to determine lock status
TransitionFromRunningToSuspended(enum ThreadStatus status)720     void TransitionFromRunningToSuspended(enum ThreadStatus status) NO_THREAD_SAFETY_ANALYSIS
721     {
722         // Do Unlock after StoreStatus, because the thread requesting a suspension should see an updated status
723         StoreStatus(status);
724         GetMutatorLock()->Unlock();
725     }
726 
727     PANDA_PUBLIC_API void SafepointPoll();
728 
729     /**
730      * From NativeCode you can call ManagedCodeBegin.
731      * From ManagedCode you can call NativeCodeBegin.
732      * Call the same type is forbidden.
733      */
734     virtual void NativeCodeBegin();
735     virtual void NativeCodeEnd();
736     [[nodiscard]] virtual bool IsInNativeCode() const;
737 
738     virtual void ManagedCodeBegin();
739     virtual void ManagedCodeEnd();
740     [[nodiscard]] virtual bool IsManagedCode() const;
741 
IsManagedScope()742     static bool IsManagedScope()
743     {
744         auto thread = GetCurrent();
745         return thread != nullptr && thread->isManagedScope_;
746     }
747 
748     [[nodiscard]] bool HasManagedCodeOnStack() const;
749     [[nodiscard]] bool HasClearStack() const;
750 
751 protected:
752     void ProtectNativeStack();
753 
754     template <bool CHECK_NATIVE_STACK = true, bool CHECK_IFRAME_STACK = true>
StackOverflowCheckResult()755     ALWAYS_INLINE inline bool StackOverflowCheckResult() const
756     {
757         // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
758         if constexpr (CHECK_NATIVE_STACK) {
759             if (UNLIKELY(__builtin_frame_address(0) < ToVoidPtr(nativeStackEnd_))) {
760                 return false;
761             }
762         }
763         // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
764         if constexpr (CHECK_IFRAME_STACK) {
765             if (UNLIKELY(GetStackFrameAllocator()->GetAllocatedSize() > iframeStackSize_)) {
766                 return false;
767             }
768         }
769         return true;
770     }
771 
772     static const int WAIT_INTERVAL = 10;
773 
774     template <typename T = void>
GetAssociatedObject()775     T *GetAssociatedObject()
776     {
777         return reinterpret_cast<T *>(object_);
778     }
779 
780     template <typename T>
SetAssociatedObject(T * object)781     void SetAssociatedObject(T *object)
782     {
783         object_ = object;
784     }
785 
InterruptPostImpl()786     virtual void InterruptPostImpl() {}
787 
UpdateId(ThreadId id)788     void UpdateId(ThreadId id)
789     {
790         // Atomic with relaxed order reason: data race with id_ with no synchronization or ordering constraints imposed
791         // on other reads or writes
792         id_.store(id, std::memory_order_relaxed);
793     }
794 
795     /**
796      * Prepares the ManagedThread instance for caching and further reuse by resetting its member variables to their
797      * default values.
798      */
799     virtual void CleanUp();
800 
801 private:
802     enum SafepointFlag : bool { DONT_CHECK_SAFEPOINT = false, CHECK_SAFEPOINT = true };
803     enum ReadlockFlag : bool { NO_READLOCK = false, READLOCK = true };
804 
805     PandaString LogThreadStack(ThreadState newState) const;
806 
807     // NO_THREAD_SAFETY_ANALYSIS due to TSAN not being able to determine lock status
808     template <SafepointFlag SAFEPOINT = DONT_CHECK_SAFEPOINT, ReadlockFlag READLOCK_FLAG = NO_READLOCK>
StoreStatus(ThreadStatus status)809     void StoreStatus(ThreadStatus status) NO_THREAD_SAFETY_ANALYSIS
810     {
811         while (true) {
812             union FlagsAndThreadStatus oldFts {
813             };
814             union FlagsAndThreadStatus newFts {
815             };
816             oldFts.asInt = ReadFlagsAndThreadStatusUnsafe();  // NOLINT(cppcoreguidelines-pro-type-union-access)
817 
818             // NOLINTNEXTLINE(readability-braces-around-statements, hicpp-braces-around-statements)
819             if constexpr (SAFEPOINT == CHECK_SAFEPOINT) {  // NOLINT(bugprone-suspicious-semicolon)
820                 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access)
821                 if (oldFts.asStruct.flags != initialThreadFlag_) {
822                     // someone requires a safepoint
823                     SafepointPoll();
824                     continue;
825                 }
826             }
827 
828             newFts.asStruct.flags = oldFts.asStruct.flags;  // NOLINT(cppcoreguidelines-pro-type-union-access)
829             newFts.asStruct.status = status;                // NOLINT(cppcoreguidelines-pro-type-union-access)
830 
831             // mutator lock should be acquired before change status
832             // to avoid blocking in running state
833             // NOLINTNEXTLINE(readability-braces-around-statements, hicpp-braces-around-statements)
834             if constexpr (READLOCK_FLAG == READLOCK) {  // NOLINT(bugprone-suspicious-semicolon)
835                 GetMutatorLock()->ReadLock();
836             }
837 
838             // clang-format conflicts with CodeCheckAgent, so disable it here
839             // clang-format off
840             // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access)
841             if (fts_.asAtomic.compare_exchange_weak(
842                 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access)
843                 oldFts.asNonvolatileInt, newFts.asNonvolatileInt, std::memory_order_release)) {
844                 // If CAS succeeded, we set new status and no request occurred here, safe to proceed.
845                 break;
846             }
847             // Release mutator lock to acquire it on the next loop iteration
848             // clang-format on
849             // NOLINTNEXTLINE(readability-braces-around-statements, hicpp-braces-around-statements)
850             if constexpr (READLOCK_FLAG == READLOCK) {  // NOLINT(bugprone-suspicious-semicolon)
851                 GetMutatorLock()->Unlock();
852             }
853         }
854     }
855 
856 #ifdef PANDA_WITH_QUICKENER
857     NO_OPTIMIZE const void *const *GetOrSetInnerDebugDispatchTable(bool set = false,
858                                                                    const void *const *dispatch_table = nullptr) const
859     {
860         thread_local static const void *const *current_debug_dispatch_table = nullptr;
861         if (set) {
862             current_debug_dispatch_table = dispatch_table;
863         }
864         return current_debug_dispatch_table;
865     }
866 
867     template <bool IS_DEBUG>
868     NO_OPTIMIZE const void *const *GetOrSetInnerDispatchTable(bool set = false,
869                                                               const void *const *dispatch_table = nullptr) const
870     {
871         thread_local static const void *const *current_dispatch_table = nullptr;
872         if (set) {
873             current_dispatch_table = dispatch_table;
874         }
875         return current_dispatch_table;
876     }
877 #endif
878 
879     virtual bool TestLockState() const;
880 
881     static constexpr uint32_t THREAD_STATUS_OFFSET = 16;
882     static_assert(sizeof(fts_) == sizeof(uint32_t), "Wrong fts_ size");
883 
884     // Can cause data races if child thread's UpdateId is executed concurrently with GetNativeThreadId
885     std::atomic<ThreadId> id_;
886 
887     static mem::TLAB *zeroTlab_;
888     PandaVector<ObjectHeader **> localObjects_;
889     WeightedAdaptiveTlabAverage *weightedAdaptiveTlabAverage_ {nullptr};
890 
891     // Something like custom TLS - it is faster to access via ManagedThread than via thread_local
892     InterpreterCache interpreterCache_;
893 
894     PandaMap<const char *, PandaUniquePtr<CustomTLSData>> customTlsCache_ GUARDED_BY(Locks::customTlsLock_);
895 
896     mem::GCG1BarrierSet::G1PostBarrierRingBufferType *g1PostBarrierRingBuffer_ {nullptr};
897     // Keep these here to speed up interpreter
898     mem::BarrierType preBarrierType_ {mem::BarrierType::PRE_WRB_NONE};
899     mem::BarrierType postBarrierType_ {mem::BarrierType::POST_WRB_NONE};
900     // Thread local storages to avoid locks in heap manager
901     mem::StackFrameAllocator *stackFrameAllocator_;
902     mem::InternalAllocator<>::LocalSmallObjectAllocator *internalLocalAllocator_;
903     std::atomic_bool isAttached_ {false};  // Can be changed after thread is registered and can cause data race
904     bool isVmThread_ = false;
905 
906     bool isManagedCodeAllowed_ {true};
907 
908     size_t throwingOomCount_ {0};
909     bool usePreallocObj_ {false};
910 
911     ark::panda_file::SourceLang threadLang_ = ark::panda_file::SourceLang::PANDA_ASSEMBLY;
912 
913     PandaUniquePtr<tooling::PtThreadInfo> ptThreadInfo_;
914 
915     // for stack overflow check
916     // |.....     Method 1    ....|
917     // |.....     Method 2    ....|
918     // |.....     Method 3    ....|_ _ _ native_stack_top
919     // |..........................|
920     // |..........................|
921     // |..........................|
922     // |..........................|
923     // |..........................|
924     // |..........................|
925     // |..........................|_ _ _ native_stack_end
926     // |..... Reserved region ....|
927     // |.... Protected region ....|_ _ _ native_stack_begin
928     // |...... Guard region ......|
929     uintptr_t nativeStackBegin_ {0};
930     // end of stack for managed thread, throw exception if native stack grow over it
931     uintptr_t nativeStackEnd_ {0};
932     // os thread stack size
933     size_t nativeStackSize_ {0};
934     // guard region size of stack
935     size_t nativeStackGuardSize_ {0};
936     // reserved region is for throw exception handle if stack overflow happen
937     size_t nativeStackReservedSize_ {0};
938     // protected region is for compiled code to test load [sp - native_stack_reserved_size_] to trigger segv
939     size_t nativeStackProtectedSize_ {0};
940     // max allowed size for interpreter frame
941     size_t iframeStackSize_ {std::numeric_limits<size_t>::max()};
942 
943     PandaVector<HandleScope<coretypes::TaggedType> *> taggedHandleScopes_ {};
944     HandleStorage<coretypes::TaggedType> *taggedHandleStorage_ {nullptr};
945     GlobalHandleStorage<coretypes::TaggedType> *taggedGlobalHandleStorage_ {nullptr};
946 
947     PandaVector<HandleScope<ObjectHeader *> *> objectHeaderHandleScopes_ {};
948     HandleStorage<ObjectHeader *> *objectHeaderHandleStorage_ {nullptr};
949 
950     os::memory::ConditionVariable suspendVar_ GUARDED_BY(suspendLock_);
951     os::memory::Mutex suspendLock_;
952     uint32_t suspendCount_ GUARDED_BY(suspendLock_) = 0;
953     std::atomic_uint32_t userCodeSuspendCount_ {0};
954 
955     PandaStack<ThreadState> threadFrameStates_;
956 
957     // Boolean which is safe to access after runtime is destroyed
958     bool isManagedScope_ {false};
959 
960     friend class ark::test::ThreadTest;
961     friend class ark::MTThreadManager;
962 
963     // Used in mathod events
964     uint32_t callDepth_ {0};
965 #ifndef PANDA_WITH_QUICKENER
966     void *const *debugDispatchTable_ {nullptr};
967     void *const *debugStubDispatchTable_ {nullptr};
968     void *const *dispatchTable_ {nullptr};
969 #endif
970 
971     NO_COPY_SEMANTIC(ManagedThread);
972     NO_MOVE_SEMANTIC(ManagedThread);
973 };
974 }  // namespace ark
975 
976 #endif  // PANDA_RUNTIME_MANAGED_THREAD_H
977