• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_RUNTIME_MANAGED_THREAD_H
16 #define PANDA_RUNTIME_MANAGED_THREAD_H
17 
18 #include "thread.h"
19 
20 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
21 #define ASSERT_MANAGED_CODE() ASSERT(::panda::ManagedThread::GetCurrent()->IsManagedCode())
22 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
23 #define ASSERT_NATIVE_CODE() ASSERT(::panda::ManagedThread::GetCurrent()->IsInNativeCode())
24 
25 namespace panda {
26 class MTThreadManager;
27 /**
28  * @brief Class represents managed thread
29  *
30  * When the thread is created it registers itself in the runtime, so
31  * runtime knows about all managed threads at any given time.
32  *
33  * This class should be used to store thread specitic information that
34  * is necessary to execute managed code:
35  *  - Frame
36  *  - Exception
37  *  - Interpreter cache
38  *  - etc.
39  *
40  *  Now it's used by interpreter to store current frame only.
41  */
42 class ManagedThread : public Thread {
43 public:
44     enum ThreadState : uint8_t { NATIVE_CODE = 0, MANAGED_CODE = 1 };
45 
46     using NativeHandleType = os::thread::NativeHandleType;
47     static constexpr ThreadId NON_INITIALIZED_THREAD_ID = 0;
48     static constexpr ThreadId MAX_INTERNAL_THREAD_ID = MarkWord::LIGHT_LOCK_THREADID_MAX_COUNT;
49     static constexpr size_t STACK_MAX_SIZE_OVERFLOW_CHECK = 256_MB;
50 #if defined(PANDA_ASAN_ON) || defined(PANDA_TSAN_ON) || !defined(NDEBUG)
51     static constexpr size_t STACK_OVERFLOW_RESERVED_SIZE = 64_KB;
52 #else
53     static constexpr size_t STACK_OVERFLOW_RESERVED_SIZE = 12_KB;
54 #endif
55     static constexpr size_t STACK_OVERFLOW_PROTECTED_SIZE = 4_KB;
56 
SetLanguageContext(const LanguageContext & ctx)57     void SetLanguageContext([[maybe_unused]] const LanguageContext &ctx)
58     {
59         // Deprecated method, don't use it. Only for copability with js_runtime.
60     }
61 
SetCurrentFrame(Frame * f)62     void SetCurrentFrame(Frame *f)
63     {
64         frame_ = f;
65     }
66 
GetPtThreadInfo()67     tooling::PtThreadInfo *GetPtThreadInfo() const
68     {
69         return ptThreadInfo_.get();
70     }
71 
GetCurrentFrame()72     Frame *GetCurrentFrame() const
73     {
74         return frame_;
75     }
76 
GetFrame()77     void *GetFrame() const
78     {
79         void *fp = GetCurrentFrame();
80         if (IsCurrentFrameCompiled()) {
81             return (StackWalker::IsBoundaryFrame<FrameKind::INTERPRETER>(fp))
82                        ? (StackWalker::GetPrevFromBoundary<FrameKind::COMPILER>(fp))
83                        : fp;
84         }
85         return fp;
86     }
87 
IsCurrentFrameCompiled()88     bool IsCurrentFrameCompiled() const
89     {
90         return isCompiledFrame_;
91     }
92 
SetCurrentFrameIsCompiled(bool value)93     void SetCurrentFrameIsCompiled(bool value)
94     {
95         isCompiledFrame_ = value;
96     }
97 
SetException(ObjectHeader * exception)98     void SetException(ObjectHeader *exception)
99     {
100         exception_ = exception;
101     }
102 
GetException()103     ObjectHeader *GetException() const
104     {
105         return exception_;
106     }
107 
HasPendingException()108     bool HasPendingException() const
109     {
110         return exception_ != nullptr;
111     }
112 
ClearException()113     void ClearException()
114     {
115         exception_ = nullptr;
116     }
117 
GetIFrameStackSize()118     size_t GetIFrameStackSize() const
119     {
120         return iframeStackSize_;
121     }
122 
ThreadIsManagedThread(const Thread * thread)123     static bool ThreadIsManagedThread(const Thread *thread)
124     {
125         ASSERT(thread != nullptr);
126         Thread::ThreadType threadType = thread->GetThreadType();
127         return threadType == Thread::ThreadType::THREAD_TYPE_MANAGED ||
128                threadType == Thread::ThreadType::THREAD_TYPE_MT_MANAGED ||
129                threadType == Thread::ThreadType::THREAD_TYPE_TASK;
130     }
131 
CastFromThread(Thread * thread)132     static ManagedThread *CastFromThread(Thread *thread)
133     {
134         ASSERT(thread != nullptr);
135         ASSERT(ThreadIsManagedThread(thread));
136         return static_cast<ManagedThread *>(thread);
137     }
138 
139     /**
140      * @brief GetCurrentRaw Unsafe method to get current ManagedThread.
141      * It can be used in hotspots to get the best performance.
142      * We can only use this method in places where the ManagedThread exists.
143      * @return pointer to ManagedThread
144      */
GetCurrentRaw()145     static ManagedThread *GetCurrentRaw()
146     {
147         return CastFromThread(Thread::GetCurrent());
148     }
149 
150     /**
151      * @brief GetCurrent Safe method to gets current ManagedThread.
152      * @return pointer to ManagedThread or nullptr (if current thread is not a managed thread)
153      */
GetCurrent()154     PANDA_PUBLIC_API static ManagedThread *GetCurrent()
155     {
156         Thread *thread = Thread::GetCurrent();
157         ASSERT(thread != nullptr);
158         if (ThreadIsManagedThread(thread)) {
159             return CastFromThread(thread);
160         }
161         return nullptr;
162     }
163 
164     static void Initialize();
165 
166     static void Shutdown();
167 
IsThreadAlive()168     bool IsThreadAlive()
169     {
170         return GetStatus() != ThreadStatus::FINISHED;
171     }
172 
UpdateStatus(enum ThreadStatus status)173     void UpdateStatus(enum ThreadStatus status)
174     {
175         ASSERT(ManagedThread::GetCurrent() == this);
176 
177         ThreadStatus oldStatus = GetStatus();
178         if (oldStatus == ThreadStatus::RUNNING && status != ThreadStatus::RUNNING) {
179             TransitionFromRunningToSuspended(status);
180         } else if (oldStatus != ThreadStatus::RUNNING && status == ThreadStatus::RUNNING) {
181             // NB! This thread is treated as suspended so when we transition from suspended state to
182             // running we need to check suspension flag and counter so SafepointPoll has to be done before
183             // acquiring mutator_lock.
184             // StoreStatus acquires lock here
185             StoreStatus<CHECK_SAFEPOINT, READLOCK>(ThreadStatus::RUNNING);
186         } else if (oldStatus == ThreadStatus::NATIVE && status != ThreadStatus::IS_TERMINATED_LOOP &&
187                    IsRuntimeTerminated()) {
188             // If a daemon thread with NATIVE status was deregistered, it should not access any managed object,
189             // i.e. change its status from NATIVE, because such object may already be deleted by the runtime.
190             // In case its status is changed, we must call a Safepoint to terminate this thread.
191             // For example, if a daemon thread calls ManagedCodeBegin (which changes status from NATIVE to
192             // RUNNING), it may be interrupted by a GC thread, which changes status to IS_SUSPENDED.
193             StoreStatus<CHECK_SAFEPOINT>(status);
194         } else {
195             // NB! Status is not a simple bit, without atomics it can produce faulty GetStatus.
196             StoreStatus(status);
197         }
198     }
199 
GetStatus()200     enum ThreadStatus GetStatus()
201     {
202         // Atomic with acquire order reason: data race with flags with dependecies on reads after
203         // the load which should become visible
204         // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access)
205         uint32_t resInt = fts_.asAtomic.load(std::memory_order_acquire);
206         return static_cast<enum ThreadStatus>(resInt >> THREAD_STATUS_OFFSET);
207     }
208 
209     static PandaString ThreadStatusAsString(enum ThreadStatus status);
210 
GetStackFrameAllocator()211     panda::mem::StackFrameAllocator *GetStackFrameAllocator() const
212     {
213         return stackFrameAllocator_;
214     }
215 
GetLocalInternalAllocator()216     panda::mem::InternalAllocator<>::LocalSmallObjectAllocator *GetLocalInternalAllocator() const
217     {
218         return internalLocalAllocator_;
219     }
220 
GetTLAB()221     mem::TLAB *GetTLAB() const
222     {
223         ASSERT(tlab_ != nullptr);
224         return tlab_;
225     }
226 
227     void UpdateTLAB(mem::TLAB *tlab);
228 
229     void ClearTLAB();
230 
SetStringClassPtr(void * p)231     void SetStringClassPtr(void *p)
232     {
233         stringClassPtr_ = p;
234     }
235 
SetArrayU16ClassPtr(void * p)236     void SetArrayU16ClassPtr(void *p)
237     {
238         arrayU16ClassPtr_ = p;
239     }
240 
241 #ifndef NDEBUG
IsRuntimeCallEnabled()242     bool IsRuntimeCallEnabled() const
243     {
244         return runtimeCallEnabled_ != 0;
245     }
246 #endif
247 
248     static ManagedThread *Create(
249         Runtime *runtime, PandaVM *vm,
250         panda::panda_file::SourceLang threadLang = panda::panda_file::SourceLang::PANDA_ASSEMBLY);
251     ~ManagedThread() override;
252 
253     explicit ManagedThread(ThreadId id, mem::InternalAllocatorPtr allocator, PandaVM *vm, Thread::ThreadType threadType,
254                            panda::panda_file::SourceLang threadLang = panda::panda_file::SourceLang::PANDA_ASSEMBLY);
255 
256     // Here methods which are just proxy or cache for runtime interface
257 
GetPreBarrierType()258     ALWAYS_INLINE mem::BarrierType GetPreBarrierType() const
259     {
260         return preBarrierType_;
261     }
262 
GetPostBarrierType()263     ALWAYS_INLINE mem::BarrierType GetPostBarrierType() const
264     {
265         return postBarrierType_;
266     }
267 
268     // Methods to access thread local storage
GetInterpreterCache()269     InterpreterCache *GetInterpreterCache()
270     {
271         return &interpreterCache_;
272     }
273 
GetNativePc()274     uintptr_t GetNativePc() const
275     {
276         return nativePc_;
277     }
278 
SetNativePc(uintptr_t pc)279     void SetNativePc(uintptr_t pc)
280     {
281         nativePc_ = pc;
282     }
283 
284     // buffers may be destroyed during Detach(), so it should be initialized once more
285     void InitBuffers();
286 
GetPreBuff()287     PandaVector<ObjectHeader *> *GetPreBuff() const
288     {
289         return preBuff_;
290     }
291 
MovePreBuff()292     PandaVector<ObjectHeader *> *MovePreBuff()
293     {
294         auto res = preBuff_;
295         preBuff_ = nullptr;
296         return res;
297     }
298 
GetG1PostBarrierBuffer()299     mem::GCG1BarrierSet::G1PostBarrierRingBufferType *GetG1PostBarrierBuffer()
300     {
301         return g1PostBarrierRingBuffer_;
302     }
303 
ResetG1PostBarrierBuffer()304     void ResetG1PostBarrierBuffer()
305     {
306         g1PostBarrierRingBuffer_ = nullptr;
307     }
308 
GetG1PostBarrierBufferOffset()309     static constexpr uint32_t GetG1PostBarrierBufferOffset()
310     {
311         return MEMBER_OFFSET(ManagedThread, g1PostBarrierRingBuffer_);
312     }
313 
GetThreadLang()314     panda::panda_file::SourceLang GetThreadLang() const
315     {
316         return threadLang_;
317     }
318 
319     PANDA_PUBLIC_API LanguageContext GetLanguageContext();
320 
IsSuspended()321     inline bool IsSuspended()
322     {
323         return ReadFlag(SUSPEND_REQUEST);
324     }
325 
IsRuntimeTerminated()326     inline bool IsRuntimeTerminated()
327     {
328         return ReadFlag(RUNTIME_TERMINATION_REQUEST);
329     }
330 
SetRuntimeTerminated()331     inline void SetRuntimeTerminated()
332     {
333         SetFlag(RUNTIME_TERMINATION_REQUEST);
334     }
335 
GetFrameKindOffset()336     static constexpr uint32_t GetFrameKindOffset()
337     {
338         return MEMBER_OFFSET(ManagedThread, isCompiledFrame_);
339     }
GetFlagOffset()340     static constexpr uint32_t GetFlagOffset()
341     {
342         return MEMBER_OFFSET(ManagedThread, fts_);
343     }
344 
GetEntrypointsOffset()345     static constexpr uint32_t GetEntrypointsOffset()
346     {
347         return MEMBER_OFFSET(ManagedThread, entrypoints_);
348     }
GetObjectOffset()349     static constexpr uint32_t GetObjectOffset()
350     {
351         return MEMBER_OFFSET(ManagedThread, object_);
352     }
GetFrameOffset()353     static constexpr uint32_t GetFrameOffset()
354     {
355         return MEMBER_OFFSET(ManagedThread, frame_);
356     }
GetExceptionOffset()357     static constexpr uint32_t GetExceptionOffset()
358     {
359         return MEMBER_OFFSET(ManagedThread, exception_);
360     }
GetNativePcOffset()361     static constexpr uint32_t GetNativePcOffset()
362     {
363         return MEMBER_OFFSET(ManagedThread, nativePc_);
364     }
GetTLABOffset()365     static constexpr uint32_t GetTLABOffset()
366     {
367         return MEMBER_OFFSET(ManagedThread, tlab_);
368     }
GetTlsCardTableAddrOffset()369     static constexpr uint32_t GetTlsCardTableAddrOffset()
370     {
371         return MEMBER_OFFSET(ManagedThread, cardTableAddr_);
372     }
GetTlsCardTableMinAddrOffset()373     static constexpr uint32_t GetTlsCardTableMinAddrOffset()
374     {
375         return MEMBER_OFFSET(ManagedThread, cardTableMinAddr_);
376     }
GetTlsPostWrbOneObjectOffset()377     static constexpr uint32_t GetTlsPostWrbOneObjectOffset()
378     {
379         return MEMBER_OFFSET(ManagedThread, postWrbOneObject_);
380     }
GetTlsPostWrbTwoObjectsOffset()381     static constexpr uint32_t GetTlsPostWrbTwoObjectsOffset()
382     {
383         return MEMBER_OFFSET(ManagedThread, postWrbTwoObjects_);
384     }
GetTlsPreWrbEntrypointOffset()385     static constexpr uint32_t GetTlsPreWrbEntrypointOffset()
386     {
387         return MEMBER_OFFSET(ManagedThread, preWrbEntrypoint_);
388     }
GetTlsStringClassPointerOffset()389     static constexpr uint32_t GetTlsStringClassPointerOffset()
390     {
391         return MEMBER_OFFSET(ManagedThread, stringClassPtr_);
392     }
GetTlsArrayU16ClassPointerOffset()393     static constexpr uint32_t GetTlsArrayU16ClassPointerOffset()
394     {
395         return MEMBER_OFFSET(ManagedThread, arrayU16ClassPtr_);
396     }
GetPreBuffOffset()397     static constexpr uint32_t GetPreBuffOffset()
398     {
399         return MEMBER_OFFSET(ManagedThread, preBuff_);
400     }
401 
GetLanguageExtensionsDataOffset()402     static constexpr uint32_t GetLanguageExtensionsDataOffset()
403     {
404         return MEMBER_OFFSET(ManagedThread, languageExtensionData_);
405     }
406 
GetRuntimeCallEnabledOffset()407     static constexpr uint32_t GetRuntimeCallEnabledOffset()
408     {
409 #ifndef NDEBUG
410         return MEMBER_OFFSET(ManagedThread, runtimeCallEnabled_);
411 #else
412         // it should not be used
413         return 0;
414 #endif
415     }
416 
GetInterpreterCacheOffset()417     static constexpr uint32_t GetInterpreterCacheOffset()
418     {
419         return MEMBER_OFFSET(ManagedThread, interpreterCache_);
420     }
421 
GetLanguageExtensionsData()422     void *GetLanguageExtensionsData() const
423     {
424         return languageExtensionData_;
425     }
426 
SetLanguageExtensionsData(void * data)427     void SetLanguageExtensionsData(void *data)
428     {
429         languageExtensionData_ = data;
430     }
431 
GetInternalIdOffset()432     static constexpr uint32_t GetInternalIdOffset()
433     {
434         return MEMBER_OFFSET(ManagedThread, internalId_);
435     }
436 
437     virtual void VisitGCRoots(const ObjectVisitor &cb);
438 
439     virtual void UpdateGCRoots();
440 
441     PANDA_PUBLIC_API void PushLocalObject(ObjectHeader **objectHeader);
442 
443     PANDA_PUBLIC_API void PopLocalObject();
444 
445     void SetThreadPriority(int32_t prio);
446 
447     uint32_t GetThreadPriority();
448 
449     // NO_THREAD_SANITIZE for invalid TSAN data race report
ReadFlag(ThreadFlag flag)450     NO_THREAD_SANITIZE bool ReadFlag(ThreadFlag flag) const
451     {
452         // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access)
453         return (fts_.asStruct.flags & static_cast<uint16_t>(flag)) != 0;
454     }
455 
TestAllFlags()456     NO_THREAD_SANITIZE bool TestAllFlags() const
457     {
458         return (fts_.asStruct.flags) != initialThreadFlag_;  // NOLINT(cppcoreguidelines-pro-type-union-access)
459     }
460 
SetFlag(ThreadFlag flag)461     void SetFlag(ThreadFlag flag)
462     {
463         // Atomic with seq_cst order reason: data race with flags with requirement for sequentially consistent order
464         // where threads observe all modifications in the same order
465         // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access)
466         fts_.asAtomic.fetch_or(flag, std::memory_order_seq_cst);
467     }
468 
ClearFlag(ThreadFlag flag)469     void ClearFlag(ThreadFlag flag)
470     {
471         // Atomic with seq_cst order reason: data race with flags with requirement for sequentially consistent order
472         // where threads observe all modifications in the same order
473         // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access)
474         fts_.asAtomic.fetch_and(UINT32_MAX ^ flag, std::memory_order_seq_cst);
475     }
476 
477     // Separate functions for NO_THREAD_SANITIZE to suppress TSAN data race report
ReadFlagsAndThreadStatusUnsafe()478     NO_THREAD_SANITIZE uint32_t ReadFlagsAndThreadStatusUnsafe()
479     {
480         // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access)
481         return fts_.asInt;
482     }
483 
IsManagedCodeAllowed()484     bool IsManagedCodeAllowed() const
485     {
486         return isManagedCodeAllowed_;
487     }
488 
SetManagedCodeAllowed(bool allowed)489     void SetManagedCodeAllowed(bool allowed)
490     {
491         isManagedCodeAllowed_ = allowed;
492     }
493 
494     // TaggedType has been specialized for js, Other types are empty implementation
495     template <typename T>
496     inline HandleScope<T> *PopHandleScope();
497 
498     // TaggedType has been specialized for js, Other types are empty implementation
499     template <typename T>
500     inline void PushHandleScope([[maybe_unused]] HandleScope<T> *handleScope);
501 
502     // TaggedType has been specialized for js, Other types are empty implementation
503     template <typename T>
504     inline HandleScope<T> *GetTopScope() const;
505 
506     // TaggedType has been specialized for js, Other types are empty implementation
507     template <typename T>
508     inline HandleStorage<T> *GetHandleStorage() const;
509 
510     // TaggedType has been specialized for js, Other types are empty implementation
511     template <typename T>
512     inline GlobalHandleStorage<T> *GetGlobalHandleStorage() const;
513 
514     PANDA_PUBLIC_API CustomTLSData *GetCustomTLSData(const char *key);
515     PANDA_PUBLIC_API void SetCustomTLSData(const char *key, CustomTLSData *data);
516     PANDA_PUBLIC_API bool EraseCustomTLSData(const char *key);
517 
518 #if EVENT_METHOD_ENTER_ENABLED || EVENT_METHOD_EXIT_ENABLED
RecordMethodEnter()519     uint32_t RecordMethodEnter()
520     {
521         return call_depth_++;
522     }
523 
RecordMethodExit()524     uint32_t RecordMethodExit()
525     {
526         return --call_depth_;
527     }
528 #endif
529 
IsAttached()530     bool IsAttached()
531     {
532         // Atomic with relaxed order reason: data race with is_attached_ with no synchronization or ordering constraints
533         // imposed on other reads or writes
534         return isAttached_.load(std::memory_order_relaxed);
535     }
536 
SetAttached()537     void SetAttached()
538     {
539         // Atomic with relaxed order reason: data race with is_attached_ with no synchronization or ordering constraints
540         // imposed on other reads or writes
541         isAttached_.store(true, std::memory_order_relaxed);
542     }
543 
SetDetached()544     void SetDetached()
545     {
546         // Atomic with relaxed order reason: data race with is_attached_ with no synchronization or ordering constraints
547         // imposed on other reads or writes
548         isAttached_.store(false, std::memory_order_relaxed);
549     }
550 
IsVMThread()551     bool IsVMThread()
552     {
553         return isVmThread_;
554     }
555 
SetVMThread()556     void SetVMThread()
557     {
558         isVmThread_ = true;
559     }
560 
IsThrowingOOM()561     bool IsThrowingOOM()
562     {
563         return throwingOomCount_ > 0;
564     }
565 
SetThrowingOOM(bool isThrowingOom)566     void SetThrowingOOM(bool isThrowingOom)
567     {
568         if (isThrowingOom) {
569             throwingOomCount_++;
570             return;
571         }
572         ASSERT(throwingOomCount_ > 0);
573         throwingOomCount_--;
574     }
575 
IsUsePreAllocObj()576     bool IsUsePreAllocObj()
577     {
578         return usePreallocObj_;
579     }
580 
SetUsePreAllocObj(bool usePreallocObj)581     void SetUsePreAllocObj(bool usePreallocObj)
582     {
583         usePreallocObj_ = usePreallocObj;
584     }
585 
586     PANDA_PUBLIC_API void PrintSuspensionStackIfNeeded();
587 
GetId()588     ThreadId GetId() const
589     {
590         // Atomic with relaxed order reason: data race with id_ with no synchronization or ordering constraints imposed
591         // on other reads or writes
592         return id_.load(std::memory_order_relaxed);
593     }
594 
595     void FreeInternalMemory() override;
596     void DestroyInternalResources();
597 
598     /// Clears the pre/post barrier buffers (and other resources) without deallocation.
599     void CleanupInternalResources();
600 
601     void InitForStackOverflowCheck(size_t nativeStackReservedSize, size_t nativeStackProtectedSize);
602     virtual void DisableStackOverflowCheck();
603     virtual void EnableStackOverflowCheck();
604     /// Obtains current thread's native stack parameters and returns true on success
605     virtual bool RetrieveStackInfo(void *&stackAddr, size_t &stackSize, size_t &guardSize);
606 
607     template <bool CHECK_NATIVE_STACK = true, bool CHECK_IFRAME_STACK = true>
608     ALWAYS_INLINE inline bool StackOverflowCheck();
609 
GetStackOverflowCheckOffset()610     static size_t GetStackOverflowCheckOffset()
611     {
612         return STACK_OVERFLOW_RESERVED_SIZE;
613     }
614 
GetDebugDispatchTable()615     void *const *GetDebugDispatchTable() const
616     {
617 #ifdef PANDA_WITH_QUICKENER
618         return const_cast<void *const *>(GetOrSetInnerDebugDispatchTable());
619 #else
620         return debugDispatchTable_;
621 #endif
622     }
623 
SetDebugDispatchTable(const void * const * dispatchTable)624     void SetDebugDispatchTable(const void *const *dispatchTable)
625     {
626 #ifdef PANDA_WITH_QUICKENER
627         GetOrSetInnerDebugDispatchTable(true, dispatch_table);
628 #else
629         debugDispatchTable_ = const_cast<void *const *>(dispatchTable);
630 #endif
631     }
632 
633     template <bool IS_DEBUG>
GetCurrentDispatchTable()634     void *const *GetCurrentDispatchTable() const
635     {
636 #ifdef PANDA_WITH_QUICKENER
637         return const_cast<void *const *>(GetOrSetInnerDispatchTable<is_debug>());
638 #else
639         if constexpr (IS_DEBUG) {
640             return debugStubDispatchTable_;
641         } else {
642             return dispatchTable_;
643         }
644 #endif
645     }
646 
647     template <bool IS_DEBUG>
SetCurrentDispatchTable(const void * const * dispatchTable)648     void SetCurrentDispatchTable(const void *const *dispatchTable)
649     {
650 #ifdef PANDA_WITH_QUICKENER
651         GetOrSetInnerDispatchTable<is_debug>(true, dispatch_table);
652 #else
653         if constexpr (IS_DEBUG) {
654             debugStubDispatchTable_ = const_cast<void *const *>(dispatchTable);
655         } else {
656             dispatchTable_ = const_cast<void *const *>(dispatchTable);
657         }
658 #endif
659     }
660 
661     PANDA_PUBLIC_API void SuspendImpl(bool internalSuspend = false);
662     PANDA_PUBLIC_API void ResumeImpl(bool internalResume = false);
663 
Suspend()664     virtual void Suspend()
665     {
666         SuspendImpl();
667     }
668 
Resume()669     virtual void Resume()
670     {
671         ResumeImpl();
672     }
673 
674     /// Transition to suspended and back to runnable, re-acquire share on mutator_lock_
675     PANDA_PUBLIC_API void SuspendCheck();
676 
IsUserSuspended()677     bool IsUserSuspended()
678     {
679         return userCodeSuspendCount_ > 0;
680     }
681 
682     /* @sync 1
683      * @description This synchronization point can be used to insert a new attribute or method
684      * into ManagedThread class.
685      */
686 
WaitSuspension()687     void WaitSuspension()
688     {
689         constexpr int TIMEOUT = 100;
690         auto oldStatus = GetStatus();
691         PrintSuspensionStackIfNeeded();
692         UpdateStatus(ThreadStatus::IS_SUSPENDED);
693         {
694             /* @sync 1
695              * @description Right after the thread updates its status to IS_SUSPENDED and right before beginning to wait
696              * for actual suspension
697              */
698             os::memory::LockHolder lock(suspendLock_);
699             while (suspendCount_ > 0) {
700                 suspendVar_.TimedWait(&suspendLock_, TIMEOUT);
701                 // In case runtime is being terminated, we should abort suspension and release monitors
702                 if (UNLIKELY(IsRuntimeTerminated())) {
703                     suspendLock_.Unlock();
704                     OnRuntimeTerminated();
705                     UNREACHABLE();
706                 }
707             }
708             ASSERT(!IsSuspended());
709         }
710         UpdateStatus(oldStatus);
711     }
712 
OnRuntimeTerminated()713     virtual void OnRuntimeTerminated() {}
714 
715     // NO_THREAD_SAFETY_ANALYSIS due to TSAN not being able to determine lock status
TransitionFromRunningToSuspended(enum ThreadStatus status)716     void TransitionFromRunningToSuspended(enum ThreadStatus status) NO_THREAD_SAFETY_ANALYSIS
717     {
718         // Do Unlock after StoreStatus, because the thread requesting a suspension should see an updated status
719         StoreStatus(status);
720         GetMutatorLock()->Unlock();
721     }
722 
723     PANDA_PUBLIC_API void SafepointPoll();
724 
725     /**
726      * From NativeCode you can call ManagedCodeBegin.
727      * From ManagedCode you can call NativeCodeBegin.
728      * Call the same type is forbidden.
729      */
730     virtual void NativeCodeBegin();
731     virtual void NativeCodeEnd();
732     [[nodiscard]] virtual bool IsInNativeCode() const;
733 
734     virtual void ManagedCodeBegin();
735     virtual void ManagedCodeEnd();
736     [[nodiscard]] virtual bool IsManagedCode() const;
737 
IsManagedScope()738     static bool IsManagedScope()
739     {
740         auto thread = GetCurrent();
741         return thread != nullptr && thread->isManagedScope_;
742     }
743 
744     [[nodiscard]] bool HasManagedCodeOnStack() const;
745     [[nodiscard]] bool HasClearStack() const;
746 
747 protected:
748     void ProtectNativeStack();
749 
750     template <bool CHECK_NATIVE_STACK = true, bool CHECK_IFRAME_STACK = true>
StackOverflowCheckResult()751     ALWAYS_INLINE inline bool StackOverflowCheckResult() const
752     {
753         // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
754         if constexpr (CHECK_NATIVE_STACK) {
755             if (UNLIKELY(__builtin_frame_address(0) < ToVoidPtr(nativeStackEnd_))) {
756                 return false;
757             }
758         }
759         // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
760         if constexpr (CHECK_IFRAME_STACK) {
761             if (UNLIKELY(GetStackFrameAllocator()->GetAllocatedSize() > iframeStackSize_)) {
762                 return false;
763             }
764         }
765         return true;
766     }
767 
768     static const int WAIT_INTERVAL = 10;
769 
770     template <typename T = void>
GetAssociatedObject()771     T *GetAssociatedObject()
772     {
773         return reinterpret_cast<T *>(object_);
774     }
775 
776     template <typename T>
SetAssociatedObject(T * object)777     void SetAssociatedObject(T *object)
778     {
779         object_ = object;
780     }
781 
InterruptPostImpl()782     virtual void InterruptPostImpl() {}
783 
UpdateId(ThreadId id)784     void UpdateId(ThreadId id)
785     {
786         // Atomic with relaxed order reason: data race with id_ with no synchronization or ordering constraints imposed
787         // on other reads or writes
788         id_.store(id, std::memory_order_relaxed);
789     }
790 
791     /**
792      * Prepares the ManagedThread instance for caching and further reuse by resetting its member variables to their
793      * default values.
794      */
795     virtual void CleanUp();
796 
797 private:
798     enum SafepointFlag : bool { DONT_CHECK_SAFEPOINT = false, CHECK_SAFEPOINT = true };
799     enum ReadlockFlag : bool { NO_READLOCK = false, READLOCK = true };
800 
801     PandaString LogThreadStack(ThreadState newState) const;
802 
803     // NO_THREAD_SAFETY_ANALYSIS due to TSAN not being able to determine lock status
804     template <SafepointFlag SAFEPOINT = DONT_CHECK_SAFEPOINT, ReadlockFlag READLOCK_FLAG = NO_READLOCK>
StoreStatus(ThreadStatus status)805     void StoreStatus(ThreadStatus status) NO_THREAD_SAFETY_ANALYSIS
806     {
807         while (true) {
808             union FlagsAndThreadStatus oldFts {
809             };
810             union FlagsAndThreadStatus newFts {
811             };
812             oldFts.asInt = ReadFlagsAndThreadStatusUnsafe();  // NOLINT(cppcoreguidelines-pro-type-union-access)
813 
814             // NOLINTNEXTLINE(readability-braces-around-statements, hicpp-braces-around-statements)
815             if constexpr (SAFEPOINT == CHECK_SAFEPOINT) {  // NOLINT(bugprone-suspicious-semicolon)
816                 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access)
817                 if (oldFts.asStruct.flags != initialThreadFlag_) {
818                     // someone requires a safepoint
819                     SafepointPoll();
820                     continue;
821                 }
822             }
823 
824             newFts.asStruct.flags = oldFts.asStruct.flags;  // NOLINT(cppcoreguidelines-pro-type-union-access)
825             newFts.asStruct.status = status;                // NOLINT(cppcoreguidelines-pro-type-union-access)
826 
827             // mutator lock should be acquired before change status
828             // to avoid blocking in running state
829             // NOLINTNEXTLINE(readability-braces-around-statements, hicpp-braces-around-statements)
830             if constexpr (READLOCK_FLAG == READLOCK) {  // NOLINT(bugprone-suspicious-semicolon)
831                 GetMutatorLock()->ReadLock();
832             }
833 
834             // clang-format conflicts with CodeCheckAgent, so disable it here
835             // clang-format off
836             // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access)
837             if (fts_.asAtomic.compare_exchange_weak(
838                 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access)
839                 oldFts.asNonvolatileInt, newFts.asNonvolatileInt, std::memory_order_release)) {
840                 // If CAS succeeded, we set new status and no request occurred here, safe to proceed.
841                 break;
842             }
843             // Release mutator lock to acquire it on the next loop iteration
844             // clang-format on
845             // NOLINTNEXTLINE(readability-braces-around-statements, hicpp-braces-around-statements)
846             if constexpr (READLOCK_FLAG == READLOCK) {  // NOLINT(bugprone-suspicious-semicolon)
847                 GetMutatorLock()->Unlock();
848             }
849         }
850     }
851 
852 #ifdef PANDA_WITH_QUICKENER
853     NO_OPTIMIZE const void *const *GetOrSetInnerDebugDispatchTable(bool set = false,
854                                                                    const void *const *dispatch_table = nullptr) const
855     {
856         thread_local static const void *const *current_debug_dispatch_table = nullptr;
857         if (set) {
858             current_debug_dispatch_table = dispatch_table;
859         }
860         return current_debug_dispatch_table;
861     }
862 
863     template <bool IS_DEBUG>
864     NO_OPTIMIZE const void *const *GetOrSetInnerDispatchTable(bool set = false,
865                                                               const void *const *dispatch_table = nullptr) const
866     {
867         thread_local static const void *const *current_dispatch_table = nullptr;
868         if (set) {
869             current_dispatch_table = dispatch_table;
870         }
871         return current_dispatch_table;
872     }
873 #endif
874 
875     virtual bool TestLockState() const;
876 
877     static constexpr uint32_t THREAD_STATUS_OFFSET = 16;
878     static_assert(sizeof(fts_) == sizeof(uint32_t), "Wrong fts_ size");
879 
880     // Can cause data races if child thread's UpdateId is executed concurrently with GetNativeThreadId
881     std::atomic<ThreadId> id_;
882 
883     static mem::TLAB *zeroTlab_;
884     PandaVector<ObjectHeader **> localObjects_;
885 
886     // Something like custom TLS - it is faster to access via ManagedThread than via thread_local
887     InterpreterCache interpreterCache_;
888 
889     PandaMap<const char *, PandaUniquePtr<CustomTLSData>> customTlsCache_ GUARDED_BY(Locks::customTlsLock_);
890 
891     mem::GCG1BarrierSet::G1PostBarrierRingBufferType *g1PostBarrierRingBuffer_ {nullptr};
892     // Keep these here to speed up interpreter
893     mem::BarrierType preBarrierType_ {mem::BarrierType::PRE_WRB_NONE};
894     mem::BarrierType postBarrierType_ {mem::BarrierType::POST_WRB_NONE};
895     // Thread local storages to avoid locks in heap manager
896     mem::StackFrameAllocator *stackFrameAllocator_;
897     mem::InternalAllocator<>::LocalSmallObjectAllocator *internalLocalAllocator_;
898     std::atomic_bool isAttached_ {false};  // Can be changed after thread is registered and can cause data race
899     bool isVmThread_ = false;
900 
901     bool isManagedCodeAllowed_ {true};
902 
903     size_t throwingOomCount_ {0};
904     bool usePreallocObj_ {false};
905 
906     panda::panda_file::SourceLang threadLang_ = panda::panda_file::SourceLang::PANDA_ASSEMBLY;
907 
908     PandaUniquePtr<tooling::PtThreadInfo> ptThreadInfo_;
909 
910     // for stack overflow check
911     // |.....     Method 1    ....|
912     // |.....     Method 2    ....|
913     // |.....     Method 3    ....|_ _ _ native_stack_top
914     // |..........................|
915     // |..........................|
916     // |..........................|
917     // |..........................|
918     // |..........................|
919     // |..........................|
920     // |..........................|_ _ _ native_stack_end
921     // |..... Reserved region ....|
922     // |.... Protected region ....|_ _ _ native_stack_begin
923     // |...... Guard region ......|
924     uintptr_t nativeStackBegin_ {0};
925     // end of stack for managed thread, throw exception if native stack grow over it
926     uintptr_t nativeStackEnd_ {0};
927     // os thread stack size
928     size_t nativeStackSize_ {0};
929     // guard region size of stack
930     size_t nativeStackGuardSize_ {0};
931     // reserved region is for throw exception handle if stack overflow happen
932     size_t nativeStackReservedSize_ {0};
933     // protected region is for compiled code to test load [sp - native_stack_reserved_size_] to trigger segv
934     size_t nativeStackProtectedSize_ {0};
935     // max allowed size for interpreter frame
936     size_t iframeStackSize_ {std::numeric_limits<size_t>::max()};
937 
938     PandaVector<HandleScope<coretypes::TaggedType> *> taggedHandleScopes_ {};
939     HandleStorage<coretypes::TaggedType> *taggedHandleStorage_ {nullptr};
940     GlobalHandleStorage<coretypes::TaggedType> *taggedGlobalHandleStorage_ {nullptr};
941 
942     PandaVector<HandleScope<ObjectHeader *> *> objectHeaderHandleScopes_ {};
943     HandleStorage<ObjectHeader *> *objectHeaderHandleStorage_ {nullptr};
944 
945     os::memory::ConditionVariable suspendVar_ GUARDED_BY(suspendLock_);
946     os::memory::Mutex suspendLock_;
947     uint32_t suspendCount_ GUARDED_BY(suspendLock_) = 0;
948     std::atomic_uint32_t userCodeSuspendCount_ {0};
949 
950     PandaStack<ThreadState> threadFrameStates_;
951 
952     // Boolean which is safe to access after runtime is destroyed
953     bool isManagedScope_ {false};
954 
955     friend class panda::test::ThreadTest;
956     friend class panda::MTThreadManager;
957 
958     // Used in mathod events
959     uint32_t callDepth_ {0};
960 #ifndef PANDA_WITH_QUICKENER
961     void *const *debugDispatchTable_ {nullptr};
962     void *const *debugStubDispatchTable_ {nullptr};
963     void *const *dispatchTable_ {nullptr};
964 #endif
965 
966     NO_COPY_SEMANTIC(ManagedThread);
967     NO_MOVE_SEMANTIC(ManagedThread);
968 };
969 }  // namespace panda
970 
971 #endif  // PANDA_RUNTIME_MANAGED_THREAD_H
972