• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef PANDA_RUNTIME_INCLUDE_MANAGED_THREAD_H_
17 #define PANDA_RUNTIME_INCLUDE_MANAGED_THREAD_H_
18 
19 #include "thread.h"
20 
21 namespace panda {
22 enum ThreadFlag {
23     NO_FLAGS = 0,
24     GC_SAFEPOINT_REQUEST = 1,
25     SUSPEND_REQUEST = 2,
26     RUNTIME_TERMINATION_REQUEST = 4,
27 };
28 
29 /**
30  * \brief Class represents managed thread
31  *
32  * When the thread is created it registers itself in the runtime, so
33  * runtime knows about all managed threads at any given time.
34  *
35  * This class should be used to store thread specitic information that
36  * is necessary to execute managed code:
37  *  - Frame
38  *  - Exception
39  *  - Interpreter cache
40  *  - etc.
41  *
42  *  Now it's used by interpreter to store current frame only.
43  */
44 class ManagedThread : public Thread {
45 public:
46     using ThreadId = uint32_t;
47     using native_handle_type = os::thread::native_handle_type;
48     static constexpr ThreadId NON_INITIALIZED_THREAD_ID = 0;
49     static constexpr ThreadId MAX_INTERNAL_THREAD_ID = MarkWord::LIGHT_LOCK_THREADID_MAX_COUNT;
50 
SetLanguageContext(LanguageContext ctx)51     void SetLanguageContext(LanguageContext ctx)
52     {
53         ctx_ = ctx;
54     }
55 
GetLanguageContext()56     LanguageContext GetLanguageContext() const
57     {
58         return ctx_;
59     }
60 
SetCurrentFrame(Frame * f)61     void SetCurrentFrame(Frame *f)
62     {
63         stor_ptr_.frame_ = f;
64     }
65 
GetPtThreadInfo()66     tooling::PtThreadInfo *GetPtThreadInfo() const
67     {
68         return pt_thread_info_.get();
69     }
70 
GetCurrentFrame()71     Frame *GetCurrentFrame() const
72     {
73         return stor_ptr_.frame_;
74     }
75 
GetFrame()76     void *GetFrame() const
77     {
78         void *fp = GetCurrentFrame();
79         if (IsCurrentFrameCompiled()) {
80             return StackWalker::IsBoundaryFrame<FrameKind::INTERPRETER>(fp)
81                        ? StackWalker::GetPrevFromBoundary<FrameKind::COMPILER>(fp)
82                        : fp;
83         }
84         return fp;
85     }
86 
IsCurrentFrameCompiled()87     bool IsCurrentFrameCompiled() const
88     {
89         return stor_32_.is_compiled_frame_;
90     }
91 
SetCurrentFrameIsCompiled(bool value)92     void SetCurrentFrameIsCompiled(bool value)
93     {
94         stor_32_.is_compiled_frame_ = value;
95     }
96 
SetException(ObjectHeader * exception)97     void SetException(ObjectHeader *exception)
98     {
99         stor_ptr_.exception_ = exception;
100     }
101 
GetException()102     ObjectHeader *GetException() const
103     {
104         return stor_ptr_.exception_;
105     }
106 
HasPendingException()107     bool HasPendingException() const
108     {
109         return stor_ptr_.exception_ != nullptr;
110     }
111 
ClearException()112     void ClearException()
113     {
114         stor_ptr_.exception_ = nullptr;
115     }
116 
ThreadIsManagedThread(Thread * thread)117     static bool ThreadIsManagedThread(Thread *thread)
118     {
119         ASSERT(thread != nullptr);
120         Thread::ThreadType thread_type = thread->GetThreadType();
121         return thread_type == Thread::ThreadType::THREAD_TYPE_MANAGED ||
122                thread_type == Thread::ThreadType::THREAD_TYPE_MT_MANAGED;
123     }
124 
CastFromThread(Thread * thread)125     static ManagedThread *CastFromThread(Thread *thread)
126     {
127         ASSERT(thread != nullptr);
128         ASSERT(ThreadIsManagedThread(thread));
129         return static_cast<ManagedThread *>(thread);
130     }
131 
132     /**
133      * @brief GetCurrentRaw Unsafe method to get current ManagedThread.
134      * It can be used in hotspots to get the best performance.
135      * We can only use this method in places where the ManagedThread exists.
136      * @return pointer to ManagedThread
137      */
GetCurrentRaw()138     static ManagedThread *GetCurrentRaw()
139     {
140         return CastFromThread(Thread::GetCurrent());
141     }
142 
143     /**
144      * @brief GetCurrent Safe method to gets current ManagedThread.
145      * @return pointer to ManagedThread or nullptr (if current thread is not a managed thread)
146      */
GetCurrent()147     static ManagedThread *GetCurrent()
148     {
149         Thread *thread = Thread::GetCurrent();
150         ASSERT(thread != nullptr);
151         if (ThreadIsManagedThread(thread)) {
152             return CastFromThread(thread);
153         }
154         return nullptr;
155     }
156 
157     static bool Initialize();
158 
159     static bool Shutdown();
160 
IsThreadAlive()161     bool IsThreadAlive() const
162     {
163         return GetStatus() != FINISHED;
164     }
165 
GetStatus()166     enum ThreadStatus GetStatus() const
167     {
168         // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access)
169         uint32_t res_int = stor_32_.fts_.as_atomic.load(std::memory_order_acquire);
170         return static_cast<enum ThreadStatus>(res_int >> THREAD_STATUS_OFFSET);
171     }
172 
GetStackFrameAllocator()173     panda::mem::StackFrameAllocator *GetStackFrameAllocator() const
174     {
175         return stack_frame_allocator_;
176     }
177 
GetLocalInternalAllocator()178     panda::mem::InternalAllocator<>::LocalSmallObjectAllocator *GetLocalInternalAllocator() const
179     {
180         return internal_local_allocator_;
181     }
182 
GetTLAB()183     mem::TLAB *GetTLAB() const
184     {
185         ASSERT(stor_ptr_.tlab_ != nullptr);
186         return stor_ptr_.tlab_;
187     }
188 
189     void UpdateTLAB(mem::TLAB *tlab);
190 
191     void ClearTLAB();
192 
SetStringClassPtr(void * p)193     void SetStringClassPtr(void *p)
194     {
195         stor_ptr_.string_class_ptr_ = p;
196     }
197 
198     static ManagedThread *Create(Runtime *runtime, PandaVM *vm);
199     ~ManagedThread() override;
200 
201     explicit ManagedThread(ThreadId id, mem::InternalAllocatorPtr allocator, PandaVM *vm,
202                            Thread::ThreadType thread_type);
203 
204     // Here methods which are just proxy or cache for runtime interface
GetPreBarrierType()205     ALWAYS_INLINE mem::BarrierType GetPreBarrierType() const
206     {
207         return pre_barrier_type_;
208     }
209 
GetPostBarrierType()210     ALWAYS_INLINE mem::BarrierType GetPostBarrierType() const
211     {
212         return post_barrier_type_;
213     }
214 
215     // Methods to access thread local storage
GetInterpreterCache()216     InterpreterCache *GetInterpreterCache()
217     {
218         return &interpreter_cache_;
219     }
220 
GetNativePc()221     uintptr_t GetNativePc() const
222     {
223         return stor_ptr_.native_pc_;
224     }
225 
IsJavaThread()226     bool IsJavaThread() const
227     {
228         return is_java_thread_;
229     }
230 
IsJSThread()231     bool IsJSThread() const
232     {
233         return is_js_thread_;
234     }
235 
236     LanguageContext GetLanguageContext();
237 
IsSuspended()238     inline bool IsSuspended() const
239     {
240         return ReadFlag(SUSPEND_REQUEST);
241     }
242 
IsRuntimeTerminated()243     inline bool IsRuntimeTerminated() const
244     {
245         return ReadFlag(RUNTIME_TERMINATION_REQUEST);
246     }
247 
SetRuntimeTerminated()248     inline void SetRuntimeTerminated()
249     {
250         SetFlag(RUNTIME_TERMINATION_REQUEST);
251     }
252 
GetPtrStorageOffset(Arch arch,size_t offset)253     static constexpr size_t GetPtrStorageOffset(Arch arch, size_t offset)
254     {
255         return MEMBER_OFFSET(ManagedThread, stor_ptr_) + StoragePackedPtr::ConvertOffset(PointerSize(arch), offset);
256     }
257 
GetFlagOffset()258     static constexpr uint32_t GetFlagOffset()
259     {
260         return MEMBER_OFFSET(ManagedThread, stor_32_) + MEMBER_OFFSET(StoragePacked32, fts_);
261     }
262 
GetNativePcOffset(Arch arch)263     static constexpr uint32_t GetNativePcOffset(Arch arch)
264     {
265         return GetPtrStorageOffset(arch, MEMBER_OFFSET(StoragePackedPtr, native_pc_));
266     }
267 
GetFrameKindOffset()268     static constexpr uint32_t GetFrameKindOffset()
269     {
270         return MEMBER_OFFSET(ManagedThread, stor_32_) + MEMBER_OFFSET(StoragePacked32, is_compiled_frame_);
271     }
272 
GetFrameOffset(Arch arch)273     static constexpr uint32_t GetFrameOffset(Arch arch)
274     {
275         return GetPtrStorageOffset(arch, MEMBER_OFFSET(StoragePackedPtr, frame_));
276     }
277 
GetExceptionOffset(Arch arch)278     static constexpr uint32_t GetExceptionOffset(Arch arch)
279     {
280         return GetPtrStorageOffset(arch, MEMBER_OFFSET(StoragePackedPtr, exception_));
281     }
282 
GetTLABOffset(Arch arch)283     static constexpr uint32_t GetTLABOffset(Arch arch)
284     {
285         return GetPtrStorageOffset(arch, MEMBER_OFFSET(StoragePackedPtr, tlab_));
286     }
287 
GetObjectOffset(Arch arch)288     static constexpr uint32_t GetObjectOffset(Arch arch)
289     {
290         return GetPtrStorageOffset(arch, MEMBER_OFFSET(StoragePackedPtr, object_));
291     }
292 
GetTlsCardTableAddrOffset(Arch arch)293     static constexpr uint32_t GetTlsCardTableAddrOffset(Arch arch)
294     {
295         return GetPtrStorageOffset(arch, MEMBER_OFFSET(StoragePackedPtr, card_table_addr_));
296     }
297 
GetTlsCardTableMinAddrOffset(Arch arch)298     static constexpr uint32_t GetTlsCardTableMinAddrOffset(Arch arch)
299     {
300         return GetPtrStorageOffset(arch, MEMBER_OFFSET(StoragePackedPtr, card_table_min_addr_));
301     }
302 
GetTlsConcurrentMarkingAddrOffset(Arch arch)303     static constexpr uint32_t GetTlsConcurrentMarkingAddrOffset(Arch arch)
304     {
305         return GetPtrStorageOffset(arch, MEMBER_OFFSET(StoragePackedPtr, concurrent_marking_addr_));
306     }
307 
308     virtual void VisitGCRoots(const ObjectVisitor &cb);
309 
310     virtual void UpdateGCRoots();
311 
312     void PushLocalObject(ObjectHeader **object_header);
313 
314     void PopLocalObject();
315 
316     void SetThreadPriority(int32_t prio);
317 
318     uint32_t GetThreadPriority() const;
319 
IsGcRequired()320     inline bool IsGcRequired() const
321     {
322         return ReadFlag(GC_SAFEPOINT_REQUEST);
323     }
324 
325     // NO_THREAD_SANITIZE for invalid TSAN data race report
ReadFlag(ThreadFlag flag)326     NO_THREAD_SANITIZE bool ReadFlag(ThreadFlag flag) const
327     {
328         return (stor_32_.fts_.as_struct.flags & flag) != 0;  // NOLINT(cppcoreguidelines-pro-type-union-access)
329     }
330 
TestAllFlags()331     NO_THREAD_SANITIZE bool TestAllFlags() const
332     {
333         return (stor_32_.fts_.as_struct.flags) != NO_FLAGS;  // NOLINT(cppcoreguidelines-pro-type-union-access)
334     }
335 
SetFlag(ThreadFlag flag)336     void SetFlag(ThreadFlag flag)
337     {
338         // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access)
339         stor_32_.fts_.as_atomic.fetch_or(flag, std::memory_order_seq_cst);
340     }
341 
ClearFlag(ThreadFlag flag)342     void ClearFlag(ThreadFlag flag)
343     {
344         // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access)
345         stor_32_.fts_.as_atomic.fetch_and(UINT32_MAX ^ flag, std::memory_order_seq_cst);
346     }
347 
348     // Separate functions for NO_THREAD_SANITIZE to suppress TSAN data race report
ReadFlagsAndThreadStatusUnsafe()349     NO_THREAD_SANITIZE uint32_t ReadFlagsAndThreadStatusUnsafe() const
350     {
351         // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access)
352         return stor_32_.fts_.as_int;
353     }
354 
StoreStatus(ThreadStatus status)355     void StoreStatus(ThreadStatus status)
356     {
357         while (true) {
358             union FlagsAndThreadStatus old_fts {
359             };
360             union FlagsAndThreadStatus new_fts {
361             };
362             old_fts.as_int = ReadFlagsAndThreadStatusUnsafe();  // NOLINT(cppcoreguidelines-pro-type-union-access)
363             new_fts.as_struct.flags = old_fts.as_struct.flags;  // NOLINT(cppcoreguidelines-pro-type-union-access)
364             new_fts.as_struct.status = status;                  // NOLINT(cppcoreguidelines-pro-type-union-access)
365             // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access)
366             if (stor_32_.fts_.as_atomic.compare_exchange_weak(old_fts.as_nonvolatile_int, new_fts.as_nonvolatile_int,
367                                                               std::memory_order_release)) {
368                 // If CAS succeeded, we set new status and no request occured here, safe to proceed.
369                 break;
370             }
371         }
372     }
373 
IsManagedCodeAllowed()374     bool IsManagedCodeAllowed() const
375     {
376         return is_managed_code_allowed_;
377     }
378 
SetManagedCodeAllowed(bool allowed)379     void SetManagedCodeAllowed(bool allowed)
380     {
381         is_managed_code_allowed_ = allowed;
382     }
383 
384     // TaggedType has been specialized for js, Other types are empty implementation
385     template <typename T>
PopHandleScope()386     inline HandleScope<T> *PopHandleScope()
387     {
388         return nullptr;
389     }
390 
391     // TaggedType has been specialized for js, Other types are empty implementation
392     template <typename T>
PushHandleScope(HandleScope<T> * handle_scope)393     inline void PushHandleScope([[maybe_unused]] HandleScope<T> *handle_scope)
394     {
395     }
396 
397     // TaggedType has been specialized for js, Other types are empty implementation
398     template <typename T>
GetTopScope()399     inline HandleScope<T> *GetTopScope() const
400     {
401         return nullptr;
402     }
403 
404     // TaggedType has been specialized for js, Other types are empty implementation
405     template <typename T>
GetHandleStorage()406     inline HandleStorage<T> *GetHandleStorage() const
407     {
408         return nullptr;
409     }
410 
411     // TaggedType has been specialized for js, Other types are empty implementation
412     template <typename T>
GetGlobalHandleStorage()413     inline GlobalHandleStorage<T> *GetGlobalHandleStorage() const
414     {
415         return nullptr;
416     }
417 
418     CustomTLSData *GetCustomTLSData(const char *key);
419     void SetCustomTLSData(const char *key, CustomTLSData *data);
420 
421 #if EVENT_METHOD_ENTER_ENABLED || EVENT_METHOD_EXIT_ENABLED
RecordMethodEnter()422     uint32_t RecordMethodEnter()
423     {
424         return call_depth_++;
425     }
426 
RecordMethodExit()427     uint32_t RecordMethodExit()
428     {
429         return --call_depth_;
430     }
431 #endif
432 
IsAttached()433     bool IsAttached() const
434     {
435         return is_attached_.load(std::memory_order_relaxed);
436     }
437 
SetAttached()438     void SetAttached()
439     {
440         is_attached_.store(true, std::memory_order_relaxed);
441     }
442 
SetDetached()443     void SetDetached()
444     {
445         is_attached_.store(false, std::memory_order_relaxed);
446     }
447 
IsVMThread()448     bool IsVMThread() const
449     {
450         return is_vm_thread_;
451     }
452 
SetVMThread()453     void SetVMThread()
454     {
455         is_vm_thread_ = true;
456     }
457 
IsThrowingOOM()458     bool IsThrowingOOM() const
459     {
460         return throwing_oom_count_ > 0;
461     }
462 
SetThrowingOOM(bool is_throwing_oom)463     void SetThrowingOOM(bool is_throwing_oom)
464     {
465         if (is_throwing_oom) {
466             throwing_oom_count_++;
467             return;
468         }
469         ASSERT(throwing_oom_count_ > 0);
470         throwing_oom_count_--;
471     }
472 
IsUsePreAllocObj()473     bool IsUsePreAllocObj() const
474     {
475         return use_prealloc_obj_;
476     }
477 
SetUsePreAllocObj(bool use_prealloc_obj)478     void SetUsePreAllocObj(bool use_prealloc_obj)
479     {
480         use_prealloc_obj_ = use_prealloc_obj;
481     }
482 
483     void PrintSuspensionStackIfNeeded();
484 
GetId()485     ThreadId GetId() const
486     {
487         return id_.load(std::memory_order_relaxed);
488     }
489 
490     virtual void FreeInternalMemory();
491 
492 protected:
493     static const int WAIT_INTERVAL = 10;
494 
SetJavaThread()495     void SetJavaThread()
496     {
497         is_java_thread_ = true;
498     }
499 
SetJSThread()500     void SetJSThread()
501     {
502         is_js_thread_ = true;
503     }
504 
505     template <typename T = void>
GetAssociatedObject()506     T *GetAssociatedObject() const
507     {
508         return reinterpret_cast<T *>(stor_ptr_.object_);
509     }
510 
511     template <typename T>
SetAssociatedObject(T * object)512     void SetAssociatedObject(T *object)
513     {
514         stor_ptr_.object_ = object;
515     }
516 
InterruptPostImpl()517     virtual void InterruptPostImpl() {}
518 
UpdateId(ThreadId id)519     void UpdateId(ThreadId id)
520     {
521         id_.store(id, std::memory_order_relaxed);
522     }
523 
524 private:
525     // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE)
526     static constexpr uint32_t THREAD_STATUS_OFFSET = 16;
527     static_assert(sizeof(stor_32_.fts_) == sizeof(uint32_t), "Wrong fts_ size");
528 
529     // Can cause data races if child thread's UpdateId is executed concurrently with GetNativeThreadId
530     // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE)
531     std::atomic<ThreadId> id_;
532 
533     static mem::TLAB *zero_tlab;
534     static bool is_initialized;
535     // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE)
536     PandaVector<ObjectHeader **> local_objects_;
537 
538     // Something like custom TLS - it is faster to access via ManagedThread than via thread_local
539     // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE)
540     InterpreterCache interpreter_cache_;
541 
542     PandaMap<const char *, PandaUniquePtr<CustomTLSData>> custom_tls_cache_ GUARDED_BY(Locks::custom_tls_lock);
543 
544     // Keep these here to speed up interpreter
545     // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE)
546     mem::BarrierType pre_barrier_type_ {mem::BarrierType::PRE_WRB_NONE};
547     // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE)
548     mem::BarrierType post_barrier_type_ {mem::BarrierType::POST_WRB_NONE};
549     // Thread local storages to avoid locks in heap manager
550     // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE)
551     mem::StackFrameAllocator *stack_frame_allocator_;
552     // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE)
553     mem::InternalAllocator<>::LocalSmallObjectAllocator *internal_local_allocator_;
554     // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE)
555     bool is_java_thread_ = false;
556     // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE)
557     std::atomic_bool is_attached_ {false};  // Can be changed after thread is registered and can cause data race
558     // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE)
559     bool is_vm_thread_ = false;
560 
561     // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE)
562     bool is_js_thread_ = false;
563 
564     // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE)
565     bool is_managed_code_allowed_ {true};
566 
567     // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE)
568     size_t throwing_oom_count_ {0};
569     // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE)
570     bool use_prealloc_obj_ {false};
571 
572     // remove ctx in thread later
573     // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE)
574     LanguageContext ctx_;
575 
576     // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE)
577     PandaUniquePtr<tooling::PtThreadInfo> pt_thread_info_;
578 
579     // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE)
580     PandaVector<HandleScope<coretypes::TaggedType> *> tagged_handle_scopes_ {};
581     // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE)
582     HandleStorage<coretypes::TaggedType> *tagged_handle_storage_ {nullptr};
583     // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE)
584     GlobalHandleStorage<coretypes::TaggedType> *tagged_global_handle_storage_ {nullptr};
585 
586     // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE)
587     PandaVector<HandleScope<ObjectHeader *> *> object_header_handle_scopes_ {};
588     // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE)
589     HandleStorage<ObjectHeader *> *object_header_handle_storage_ {nullptr};
590 
591     friend class panda::test::ThreadTest;
592     friend class openjdkjvmti::TiThread;
593     friend class openjdkjvmti::ScopedNoUserCodeSuspension;
594     friend class Offsets_Thread_Test;
595     friend class panda::ThreadManager;
596 
597     // Used in method events
598     // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE)
599     uint32_t call_depth_ {0};
600 
601     // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE)
602     NO_COPY_SEMANTIC(ManagedThread);
603     // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE)
604     NO_MOVE_SEMANTIC(ManagedThread);
605 };
606 }  // namespace panda
607 
608 #endif  // PANDA_RUNTIME_INCLUDE_MANAGED_THREAD_H_