• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "runtime/include/thread-inl.h"
17 #include "libpandabase/os/stacktrace.h"
18 #include "runtime/handle_base-inl.h"
19 #include "runtime/include/locks.h"
20 #include "runtime/include/object_header-inl.h"
21 #include "runtime/include/panda_vm.h"
22 #include "runtime/include/runtime.h"
23 #include "runtime/include/runtime_notification.h"
24 #include "runtime/include/stack_walker.h"
25 #include "runtime/include/thread_scopes.h"
26 #include "runtime/interpreter/runtime_interface.h"
27 #include "runtime/handle_scope-inl.h"
28 #include "runtime/mem/object_helpers.h"
29 #include "tooling/pt_thread_info.h"
30 #include "runtime/mem/runslots_allocator-inl.h"
31 
32 namespace panda {
33 using TaggedValue = coretypes::TaggedValue;
34 using TaggedType = coretypes::TaggedType;
35 
36 mem::TLAB *ManagedThread::zeroTlab_ = nullptr;
37 static const int MIN_PRIORITY = os::thread::LOWEST_PRIORITY;
38 
GetInternalAllocator(Thread * thread)39 static mem::InternalAllocatorPtr GetInternalAllocator(Thread *thread)
40 {
41     // WORKAROUND(v.cherkashin): EcmaScript side build doesn't have HeapManager, so we get internal allocator from
42     // runtime
43     mem::HeapManager *heapManager = thread->GetVM()->GetHeapManager();
44     if (heapManager != nullptr) {
45         return heapManager->GetInternalAllocator();
46     }
47     return Runtime::GetCurrent()->GetInternalAllocator();
48 }
49 
GetInternalId()50 MTManagedThread::ThreadId MTManagedThread::GetInternalId()
51 {
52     ASSERT(internalId_ != 0);
53     return internalId_;
54 }
55 
~Thread()56 Thread::~Thread()
57 {
58     FreeAllocatedMemory();
59 }
60 
FreeInternalMemory()61 void Thread::FreeInternalMemory()
62 {
63     FreeAllocatedMemory();
64 }
65 
FreeAllocatedMemory()66 void Thread::FreeAllocatedMemory()
67 {
68     auto allocator = Runtime::GetCurrent()->GetInternalAllocator();
69     ASSERT(allocator != nullptr);
70     allocator->Delete(preBuff_);
71     preBuff_ = nullptr;
72 
73 #ifdef PANDA_USE_CUSTOM_SIGNAL_STACK
74     allocator->Free(signalStack_.ss_sp);
75 #endif
76 }
77 
Thread(PandaVM * vm,ThreadType threadType)78 Thread::Thread(PandaVM *vm, ThreadType threadType)
79     : vm_(vm), threadType_(threadType), mutatorLock_(vm->GetMutatorLock())
80 {
81     // WORKAROUND(v.cherkashin): EcmaScript side build doesn't have GC, so we skip setting barriers for this case
82     mem::GC *gc = vm->GetGC();
83     if (gc != nullptr) {
84         barrierSet_ = vm->GetGC()->GetBarrierSet();
85         InitCardTableData(barrierSet_);
86     }
87     // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access)
88     fts_.asInt = initialThreadFlag_;
89 
90 #ifdef PANDA_USE_CUSTOM_SIGNAL_STACK
91     mem::InternalAllocatorPtr allocator = Runtime::GetCurrent()->GetInternalAllocator();
92     signalStack_.ss_sp = allocator->Alloc(SIGSTKSZ * 8U);
93     signalStack_.ss_size = SIGSTKSZ * 8U;
94     signalStack_.ss_flags = 0;
95     sigaltstack(&signalStack_, nullptr);
96 #endif
97 }
98 
InitCardTableData(mem::GCBarrierSet * barrier)99 void Thread::InitCardTableData(mem::GCBarrierSet *barrier)
100 {
101     auto postBarrierType = barrier->GetPostType();
102     switch (postBarrierType) {
103         case panda::mem::BarrierType::POST_INTERGENERATIONAL_BARRIER:
104             cardTableMinAddr_ = std::get<void *>(barrier->GetPostBarrierOperand("MIN_ADDR").GetValue());
105             cardTableAddr_ = std::get<uint8_t *>(barrier->GetPostBarrierOperand("CARD_TABLE_ADDR").GetValue());
106             postWrbOneObject_ = reinterpret_cast<void *>(PostInterGenerationalBarrier1);
107             postWrbTwoObjects_ = reinterpret_cast<void *>(PostInterGenerationalBarrier2);
108             break;
109         case panda::mem::BarrierType::POST_INTERREGION_BARRIER:
110             cardTableAddr_ = std::get<uint8_t *>(barrier->GetPostBarrierOperand("CARD_TABLE_ADDR").GetValue());
111             cardTableMinAddr_ = std::get<void *>(barrier->GetPostBarrierOperand("MIN_ADDR").GetValue());
112             postWrbOneObject_ = reinterpret_cast<void *>(PostInterRegionBarrierMarkSingleFast);
113             postWrbTwoObjects_ = reinterpret_cast<void *>(PostInterRegionBarrierMarkPairFast);
114             break;
115         case panda::mem::BarrierType::POST_WRB_NONE:
116             postWrbOneObject_ = reinterpret_cast<void *>(EmptyPostWriteBarrier);
117             postWrbTwoObjects_ = reinterpret_cast<void *>(EmptyPostWriteBarrier);
118             break;
119         case mem::POST_RB_NONE:
120             break;
121         case mem::PRE_WRB_NONE:
122         case mem::PRE_RB_NONE:
123         case mem::PRE_SATB_BARRIER:
124             LOG(FATAL, RUNTIME) << "Post barrier expected";
125             break;
126     }
127 }
128 
InitPreBuff()129 void Thread::InitPreBuff()
130 {
131     auto allocator = GetInternalAllocator(this);
132     mem::GC *gc = GetVM()->GetGC();
133     auto barrier = gc->GetBarrierSet();
134     if (barrier->GetPreType() != panda::mem::BarrierType::PRE_WRB_NONE) {
135         preBuff_ = allocator->New<PandaVector<ObjectHeader *>>();
136     }
137 }
138 
GetInitialThreadFlag()139 CONSTEXPR_IN_RELEASE ThreadFlag GetInitialThreadFlag()
140 {
141 #ifndef NDEBUG
142     ThreadFlag initialFlag = Runtime::GetOptions().IsRunGcEverySafepoint() ? SAFEPOINT_REQUEST : NO_FLAGS;
143     return initialFlag;
144 #else
145     return NO_FLAGS;
146 #endif
147 }
148 
149 ThreadFlag Thread::initialThreadFlag_ = NO_FLAGS;
150 
151 /* static */
Initialize()152 void ManagedThread::Initialize()
153 {
154     ASSERT(!Thread::GetCurrent());
155     ASSERT(!zeroTlab_);
156     mem::InternalAllocatorPtr allocator = Runtime::GetCurrent()->GetInternalAllocator();
157     zeroTlab_ = allocator->New<mem::TLAB>(nullptr, 0U);
158     initialThreadFlag_ = GetInitialThreadFlag();
159 }
160 
161 /* static */
Shutdown()162 void ManagedThread::Shutdown()
163 {
164     ASSERT(zeroTlab_);
165     ManagedThread::SetCurrent(nullptr);
166     mem::InternalAllocatorPtr allocator = Runtime::GetCurrent()->GetInternalAllocator();
167     allocator->Delete(zeroTlab_);
168     zeroTlab_ = nullptr;
169     /* @sync 1
170      * @description: Runtime is terminated at this point and we cannot create new threads
171      * */
172 }
173 
174 /* static */
Yield()175 void MTManagedThread::Yield()
176 {
177     LOG(DEBUG, RUNTIME) << "Reschedule the execution of a current thread";
178     os::thread::Yield();
179 }
180 
181 /* static - creation of the initial Managed thread */
Create(Runtime * runtime,PandaVM * vm,panda::panda_file::SourceLang threadLang)182 ManagedThread *ManagedThread::Create(Runtime *runtime, PandaVM *vm, panda::panda_file::SourceLang threadLang)
183 {
184     trace::ScopedTrace scopedTrace("ManagedThread::Create");
185     mem::InternalAllocatorPtr allocator = runtime->GetInternalAllocator();
186     // Create thread structure using new, we rely on this structure to be accessible in child threads after
187     // runtime is destroyed
188     return new ManagedThread(os::thread::GetCurrentThreadId(), allocator, vm, Thread::ThreadType::THREAD_TYPE_MANAGED,
189                              threadLang);
190 }
191 
192 /* static - creation of the initial MT Managed thread */
Create(Runtime * runtime,PandaVM * vm,panda::panda_file::SourceLang threadLang)193 MTManagedThread *MTManagedThread::Create(Runtime *runtime, PandaVM *vm, panda::panda_file::SourceLang threadLang)
194 {
195     trace::ScopedTrace scopedTrace("MTManagedThread::Create");
196     mem::InternalAllocatorPtr allocator = runtime->GetInternalAllocator();
197     // Create thread structure using new, we rely on this structure to be accessible in child threads after
198     // runtime is destroyed
199     auto thread = new MTManagedThread(os::thread::GetCurrentThreadId(), allocator, vm, threadLang);
200     thread->ProcessCreatedThread();
201 
202     runtime->GetNotificationManager()->ThreadStartEvent(thread);
203 
204     return thread;
205 }
206 
ManagedThread(ThreadId id,mem::InternalAllocatorPtr allocator,PandaVM * pandaVm,Thread::ThreadType threadType,panda::panda_file::SourceLang threadLang)207 ManagedThread::ManagedThread(ThreadId id, mem::InternalAllocatorPtr allocator, PandaVM *pandaVm,
208                              Thread::ThreadType threadType, panda::panda_file::SourceLang threadLang)
209     : Thread(pandaVm, threadType),
210       id_(id),
211       threadLang_(threadLang),
212       ptThreadInfo_(allocator->New<tooling::PtThreadInfo>()),
213       threadFrameStates_(allocator->Adapter())
214 {
215     ASSERT(zeroTlab_ != nullptr);
216     tlab_ = zeroTlab_;
217 
218     // WORKAROUND(v.cherkashin): EcmaScript side build doesn't have GC, so we skip setting barriers for this case
219     mem::GC *gc = pandaVm->GetGC();
220     if (gc != nullptr) {
221         preBarrierType_ = gc->GetBarrierSet()->GetPreType();
222         postBarrierType_ = gc->GetBarrierSet()->GetPostType();
223         auto barrierSet = gc->GetBarrierSet();
224         if (barrierSet->GetPreType() != panda::mem::BarrierType::PRE_WRB_NONE) {
225             preBuff_ = allocator->New<PandaVector<ObjectHeader *>>();
226             // need to initialize in constructor because we have barriers between constructor and InitBuffers in
227             // InitializedClasses
228             g1PostBarrierRingBuffer_ = allocator->New<mem::GCG1BarrierSet::G1PostBarrierRingBufferType>();
229         }
230     }
231 
232     stackFrameAllocator_ =
233         allocator->New<mem::StackFrameAllocator>(Runtime::GetOptions().UseMallocForInternalAllocations());
234     internalLocalAllocator_ =
235         mem::InternalAllocator<>::SetUpLocalInternalAllocator(static_cast<mem::Allocator *>(allocator));
236     taggedHandleStorage_ = allocator->New<HandleStorage<TaggedType>>(allocator);
237     taggedGlobalHandleStorage_ = allocator->New<GlobalHandleStorage<TaggedType>>(allocator);
238     objectHeaderHandleStorage_ = allocator->New<HandleStorage<ObjectHeader *>>(allocator);
239 }
240 
~ManagedThread()241 ManagedThread::~ManagedThread()
242 {
243     // ManagedThread::ShutDown() may not be called when exiting js_thread, so need set current_thread = nullptr
244     // NB! ThreadManager is expected to store finished threads in separate list and GC destroys them,
245     // current_thread should be nullified in Destroy()
246     // (zero_tlab == nullptr means that we destroyed Runtime and do not need to register TLAB)
247     if (zeroTlab_ != nullptr) {
248         // We should register TLAB size for MemStats during thread destroy.
249         GetVM()->GetHeapManager()->RegisterTLAB(GetTLAB());
250     }
251 
252     mem::InternalAllocatorPtr allocator = GetInternalAllocator(this);
253     allocator->Delete(objectHeaderHandleStorage_);
254     allocator->Delete(taggedGlobalHandleStorage_);
255     allocator->Delete(taggedHandleStorage_);
256     mem::InternalAllocator<>::FinalizeLocalInternalAllocator(internalLocalAllocator_,
257                                                              static_cast<mem::Allocator *>(allocator));
258     internalLocalAllocator_ = nullptr;
259     allocator->Delete(stackFrameAllocator_);
260     allocator->Delete(ptThreadInfo_.release());
261 
262     ASSERT(threadFrameStates_.empty() && "stack should be empty");
263 }
264 
InitBuffers()265 void ManagedThread::InitBuffers()
266 {
267     auto allocator = GetInternalAllocator(this);
268     mem::GC *gc = GetVM()->GetGC();
269     auto barrier = gc->GetBarrierSet();
270     if (barrier->GetPreType() != panda::mem::BarrierType::PRE_WRB_NONE) {
271         // we need to recreate buffers if it was detach (we removed all structures) and attach again
272         // skip initializing in first attach after constructor
273         if (preBuff_ == nullptr) {
274             ASSERT(preBuff_ == nullptr);
275             preBuff_ = allocator->New<PandaVector<ObjectHeader *>>();
276             ASSERT(g1PostBarrierRingBuffer_ == nullptr);
277             g1PostBarrierRingBuffer_ = allocator->New<mem::GCG1BarrierSet::G1PostBarrierRingBufferType>();
278         }
279     }
280 }
281 
GetStackTop()282 NO_INLINE static uintptr_t GetStackTop()
283 {
284     return ToUintPtr(__builtin_frame_address(0));
285 }
286 
LoadStackPages(uintptr_t endAddr)287 NO_INLINE static void LoadStackPages(uintptr_t endAddr)
288 {
289     // ISO C++ forbids variable length array and alloca is unsafe,
290     // so we have to extend stack step by step via recursive call
291     constexpr size_t MARGIN = 512;
292     constexpr size_t STACK_PAGE_SIZE = 4_KB;
293     // NOLINTNEXTLINE(modernize-avoid-c-arrays)
294     volatile uint8_t stackBuffer[STACK_PAGE_SIZE - MARGIN];
295     if (ToUintPtr(&(stackBuffer[0])) >= endAddr + STACK_PAGE_SIZE) {
296         LoadStackPages(endAddr);
297     }
298     stackBuffer[0] = 0;
299 }
300 
RetrieveStackInfo(void * & stackAddr,size_t & stackSize,size_t & guardSize)301 bool ManagedThread::RetrieveStackInfo(void *&stackAddr, size_t &stackSize, size_t &guardSize)
302 {
303     int error = os::thread::ThreadGetStackInfo(os::thread::GetNativeHandle(), &stackAddr, &stackSize, &guardSize);
304     if (error != 0) {
305         LOG(ERROR, RUNTIME) << "RetrieveStackInfo: fail to get stack info, error = " << strerror(errno);
306     }
307     return error == 0;
308 }
309 
InitForStackOverflowCheck(size_t nativeStackReservedSize,size_t nativeStackProtectedSize)310 void ManagedThread::InitForStackOverflowCheck(size_t nativeStackReservedSize, size_t nativeStackProtectedSize)
311 {
312     void *stackBase = nullptr;
313     size_t guardSize;
314     size_t stackSize;
315 #if defined(PANDA_ASAN_ON) || defined(PANDA_TSAN_ON) || !defined(NDEBUG)
316     static constexpr size_t RESERVED_SIZE = 64_KB;
317 #else
318     static constexpr size_t RESERVED_SIZE = 12_KB;
319 #endif
320     static_assert(STACK_OVERFLOW_RESERVED_SIZE == RESERVED_SIZE);  // compiler depends on this to test load!!!
321     if (!RetrieveStackInfo(stackBase, stackSize, guardSize)) {
322         return;
323     }
324     if (guardSize < panda::os::mem::GetPageSize()) {
325         guardSize = panda::os::mem::GetPageSize();
326     }
327     if (stackSize <= nativeStackReservedSize + nativeStackProtectedSize + guardSize) {
328         LOG(ERROR, RUNTIME) << "InitForStackOverflowCheck: stack size not enough, stack_base = " << stackBase
329                             << ", stack_size = " << stackSize << ", guard_size = " << guardSize;
330         return;
331     }
332     LOG(DEBUG, RUNTIME) << "InitForStackOverflowCheck: stack_base = " << stackBase << ", stack_size = " << stackSize
333                         << ", guard_size = " << guardSize;
334     nativeStackBegin_ = ToUintPtr(stackBase) + guardSize;
335     nativeStackEnd_ = nativeStackBegin_ + nativeStackProtectedSize + nativeStackReservedSize;
336     nativeStackReservedSize_ = nativeStackReservedSize;
337     nativeStackProtectedSize_ = nativeStackProtectedSize;
338     nativeStackGuardSize_ = guardSize;
339     nativeStackSize_ = stackSize;
340     // init frame stack size same with native stack size (*4 - is just an heuristic to pass some tests)
341     // But frame stack size cannot be larger than max memory size in frame allocator
342     auto iframeStackSize = stackSize * 4;
343     auto allocatorMaxSize = stackFrameAllocator_->GetFullMemorySize();
344     iframeStackSize_ = iframeStackSize <= allocatorMaxSize ? iframeStackSize : allocatorMaxSize;
345     ProtectNativeStack();
346     stackFrameAllocator_->SetReservedMemorySize(iframeStackSize_);
347     stackFrameAllocator_->ReserveMemory();
348 }
349 
ProtectNativeStack()350 void ManagedThread::ProtectNativeStack()
351 {
352     if (nativeStackProtectedSize_ == 0) {
353         return;
354     }
355 
356     // Try to mprotect directly
357     if (!panda::os::mem::MakeMemProtected(ToVoidPtr(nativeStackBegin_), nativeStackProtectedSize_)) {
358         return;
359     }
360 
361     // If fail to mprotect, try to load stack page and then retry to mprotect
362     uintptr_t nativeStackTop = AlignDown(GetStackTop(), panda::os::mem::GetPageSize());
363     LOG(DEBUG, RUNTIME) << "ProtectNativeStack: try to load pages, mprotect error = " << strerror(errno)
364                         << ", stack_begin = " << nativeStackBegin_ << ", stack_top = " << nativeStackTop
365                         << ", stack_size = " << nativeStackSize_ << ", guard_size = " << nativeStackGuardSize_;
366     if (nativeStackSize_ > STACK_MAX_SIZE_OVERFLOW_CHECK || nativeStackEnd_ >= nativeStackTop ||
367         nativeStackTop > nativeStackEnd_ + STACK_MAX_SIZE_OVERFLOW_CHECK) {
368         LOG(ERROR, RUNTIME) << "ProtectNativeStack: too large stack, mprotect error = " << strerror(errno)
369                             << ", max_stack_size = " << STACK_MAX_SIZE_OVERFLOW_CHECK
370                             << ", stack_begin = " << nativeStackBegin_ << ", stack_top = " << nativeStackTop
371                             << ", stack_size = " << nativeStackSize_ << ", guard_size = " << nativeStackGuardSize_;
372         return;
373     }
374     LoadStackPages(nativeStackBegin_);
375     if (panda::os::mem::MakeMemProtected(ToVoidPtr(nativeStackBegin_), nativeStackProtectedSize_)) {
376         LOG(ERROR, RUNTIME) << "ProtectNativeStack: fail to protect pages, error = " << strerror(errno)
377                             << ", stack_begin = " << nativeStackBegin_ << ", stack_top = " << nativeStackTop
378                             << ", stack_size = " << nativeStackSize_ << ", guard_size = " << nativeStackGuardSize_;
379     }
380     size_t releaseSize = nativeStackTop - nativeStackBegin_ - panda::os::mem::GetPageSize();
381     if (panda::os::mem::ReleasePages(nativeStackBegin_, nativeStackBegin_ + releaseSize) != 0) {
382         LOG(ERROR, RUNTIME) << "ProtectNativeStack: fail to release pages, error = " << strerror(errno)
383                             << ", stack_begin = " << nativeStackBegin_ << ", stack_top = " << nativeStackTop
384                             << ", stack_size = " << nativeStackSize_ << ", guard_size = " << nativeStackGuardSize_
385                             << ", release_size = " << releaseSize;
386     }
387 }
388 
DisableStackOverflowCheck()389 void ManagedThread::DisableStackOverflowCheck()
390 {
391     nativeStackEnd_ = nativeStackBegin_;
392     iframeStackSize_ = std::numeric_limits<size_t>::max();
393     if (nativeStackProtectedSize_ > 0) {
394         panda::os::mem::MakeMemReadWrite(ToVoidPtr(nativeStackBegin_), nativeStackProtectedSize_);
395     }
396 }
397 
EnableStackOverflowCheck()398 void ManagedThread::EnableStackOverflowCheck()
399 {
400     nativeStackEnd_ = nativeStackBegin_ + nativeStackProtectedSize_ + nativeStackReservedSize_;
401     iframeStackSize_ = nativeStackSize_ * 4U;
402     if (nativeStackProtectedSize_ > 0) {
403         panda::os::mem::MakeMemProtected(ToVoidPtr(nativeStackBegin_), nativeStackProtectedSize_);
404     }
405 }
406 
407 // NO_THREAD_SAFETY_ANALYSIS due to TSAN not being able to determine lock status
SuspendCheck()408 void ManagedThread::SuspendCheck() NO_THREAD_SAFETY_ANALYSIS
409 {
410     // We should use internal suspension to avoid missing call of IncSuspend
411     SuspendImpl(true);
412     GetMutatorLock()->Unlock();
413     GetMutatorLock()->ReadLock();
414     ResumeImpl(true);
415 }
416 
SuspendImpl(bool internalSuspend)417 void ManagedThread::SuspendImpl(bool internalSuspend)
418 {
419     os::memory::LockHolder lock(suspendLock_);
420     LOG(DEBUG, RUNTIME) << "Suspending thread " << GetId();
421     if (!internalSuspend) {
422         if (IsUserSuspended()) {
423             LOG(DEBUG, RUNTIME) << "thread " << GetId() << " is already suspended";
424             return;
425         }
426         userCodeSuspendCount_++;
427     }
428     auto oldCount = suspendCount_++;
429     if (oldCount == 0) {
430         SetFlag(SUSPEND_REQUEST);
431     }
432 }
433 
ResumeImpl(bool internalResume)434 void ManagedThread::ResumeImpl(bool internalResume)
435 {
436     os::memory::LockHolder lock(suspendLock_);
437     LOG(DEBUG, RUNTIME) << "Resuming thread " << GetId();
438     if (!internalResume) {
439         if (!IsUserSuspended()) {
440             LOG(DEBUG, RUNTIME) << "thread " << GetId() << " is already resumed";
441             return;
442         }
443         ASSERT(userCodeSuspendCount_ != 0);
444         userCodeSuspendCount_--;
445     }
446     if (suspendCount_ > 0) {
447         suspendCount_--;
448         if (suspendCount_ == 0) {
449             ClearFlag(SUSPEND_REQUEST);
450         }
451     }
452     // Help for UnregisterExitedThread
453     TSAN_ANNOTATE_HAPPENS_BEFORE(&fts_);
454     suspendVar_.Signal();
455 }
456 
SafepointPoll()457 void ManagedThread::SafepointPoll()
458 {
459     if (this->TestAllFlags()) {
460         trace::ScopedTrace scopedTrace("RunSafepoint");
461         panda::interpreter::RuntimeInterface::Safepoint();
462     }
463 }
464 
NativeCodeBegin()465 void ManagedThread::NativeCodeBegin()
466 {
467     LOG_IF(!(threadFrameStates_.empty() || threadFrameStates_.top() != NATIVE_CODE), FATAL, RUNTIME)
468         << LogThreadStack(NATIVE_CODE) << " or stack should be empty";
469     threadFrameStates_.push(NATIVE_CODE);
470     UpdateStatus(ThreadStatus::NATIVE);
471     isManagedScope_ = false;
472 }
473 
NativeCodeEnd()474 void ManagedThread::NativeCodeEnd()
475 {
476     // thread_frame_states_ should not be accessed without MutatorLock (as runtime could have been destroyed)
477     // If this was last frame, it should have been called from Destroy() and it should UpdateStatus to FINISHED
478     // after this method
479     UpdateStatus(ThreadStatus::RUNNING);
480     isManagedScope_ = true;
481     LOG_IF(threadFrameStates_.empty(), FATAL, RUNTIME) << "stack should be not empty";
482     LOG_IF(threadFrameStates_.top() != NATIVE_CODE, FATAL, RUNTIME) << LogThreadStack(NATIVE_CODE);
483     threadFrameStates_.pop();
484 }
485 
IsInNativeCode() const486 bool ManagedThread::IsInNativeCode() const
487 {
488     LOG_IF(HasClearStack(), FATAL, RUNTIME) << "stack should be not empty";
489     return threadFrameStates_.top() == NATIVE_CODE;
490 }
491 
ManagedCodeBegin()492 void ManagedThread::ManagedCodeBegin()
493 {
494     // thread_frame_states_ should not be accessed without MutatorLock (as runtime could have been destroyed)
495     UpdateStatus(ThreadStatus::RUNNING);
496     isManagedScope_ = true;
497     LOG_IF(HasClearStack(), FATAL, RUNTIME) << "stack should be not empty";
498     LOG_IF(threadFrameStates_.top() != NATIVE_CODE, FATAL, RUNTIME) << LogThreadStack(MANAGED_CODE);
499     threadFrameStates_.push(MANAGED_CODE);
500 }
501 
ManagedCodeEnd()502 void ManagedThread::ManagedCodeEnd()
503 {
504     LOG_IF(HasClearStack(), FATAL, RUNTIME) << "stack should be not empty";
505     LOG_IF(threadFrameStates_.top() != MANAGED_CODE, FATAL, RUNTIME) << LogThreadStack(MANAGED_CODE);
506     threadFrameStates_.pop();
507     // Should be NATIVE_CODE
508     UpdateStatus(ThreadStatus::NATIVE);
509     isManagedScope_ = false;
510 }
511 
IsManagedCode() const512 bool ManagedThread::IsManagedCode() const
513 {
514     LOG_IF(HasClearStack(), FATAL, RUNTIME) << "stack should be not empty";
515     return threadFrameStates_.top() == MANAGED_CODE;
516 }
517 
518 // Since we don't allow two consecutive NativeCode frames, there is no managed code on stack if
519 // its size is 1 and last frame is Native
HasManagedCodeOnStack() const520 bool ManagedThread::HasManagedCodeOnStack() const
521 {
522     if (HasClearStack()) {
523         return false;
524     }
525     if (threadFrameStates_.size() == 1 && IsInNativeCode()) {
526         return false;
527     }
528     return true;
529 }
530 
HasClearStack() const531 bool ManagedThread::HasClearStack() const
532 {
533     return threadFrameStates_.empty();
534 }
535 
ThreadStatusAsString(enum ThreadStatus status)536 PandaString ManagedThread::ThreadStatusAsString(enum ThreadStatus status)
537 {
538     switch (status) {
539         case ThreadStatus::CREATED:
540             return "New";
541         case ThreadStatus::RUNNING:
542             return "Runnable";
543         case ThreadStatus::IS_BLOCKED:
544             return "Blocked";
545         case ThreadStatus::IS_WAITING:
546             return "Waiting";
547         case ThreadStatus::IS_TIMED_WAITING:
548             return "Timed_waiting";
549         case ThreadStatus::IS_SUSPENDED:
550             return "Suspended";
551         case ThreadStatus::IS_COMPILER_WAITING:
552             return "Compiler_waiting";
553         case ThreadStatus::IS_WAITING_INFLATION:
554             return "Waiting_inflation";
555         case ThreadStatus::IS_SLEEPING:
556             return "Sleeping";
557         case ThreadStatus::IS_TERMINATED_LOOP:
558             return "Terminated_loop";
559         case ThreadStatus::TERMINATING:
560             return "Terminating";
561         case ThreadStatus::NATIVE:
562             return "Native";
563         case ThreadStatus::FINISHED:
564             return "Terminated";
565         default:
566             return "unknown";
567     }
568 }
569 
LogThreadStack(ThreadState newState) const570 PandaString ManagedThread::LogThreadStack(ThreadState newState) const
571 {
572     PandaStringStream debugMessage;
573     static std::unordered_map<ThreadState, std::string> threadStateToStringMap = {
574         {ThreadState::NATIVE_CODE, "NATIVE_CODE"}, {ThreadState::MANAGED_CODE, "MANAGED_CODE"}};
575     auto newStateIt = threadStateToStringMap.find(newState);
576     auto topFrameIt = threadStateToStringMap.find(threadFrameStates_.top());
577     ASSERT(newStateIt != threadStateToStringMap.end());
578     ASSERT(topFrameIt != threadStateToStringMap.end());
579 
580     debugMessage << "threadId: " << GetId() << " "
581                  << "tried go to " << newStateIt->second << " state, but last frame is: " << topFrameIt->second << ", "
582                  << threadFrameStates_.size() << " frames in stack (from up to bottom): [";
583 
584     PandaStack<ThreadState> copyStack(threadFrameStates_);
585     while (!copyStack.empty()) {
586         auto it = threadStateToStringMap.find(copyStack.top());
587         ASSERT(it != threadStateToStringMap.end());
588         debugMessage << it->second;
589         if (copyStack.size() > 1) {
590             debugMessage << "|";
591         }
592         copyStack.pop();
593     }
594     debugMessage << "]";
595     return debugMessage.str();
596 }
597 
MTManagedThread(ThreadId id,mem::InternalAllocatorPtr allocator,PandaVM * pandaVm,panda::panda_file::SourceLang threadLang)598 MTManagedThread::MTManagedThread(ThreadId id, mem::InternalAllocatorPtr allocator, PandaVM *pandaVm,
599                                  panda::panda_file::SourceLang threadLang)
600     : ManagedThread(id, allocator, pandaVm, Thread::ThreadType::THREAD_TYPE_MT_MANAGED, threadLang),
601       enteringMonitor_(nullptr)
602 {
603     ASSERT(pandaVm != nullptr);
604     auto threadManager = reinterpret_cast<MTThreadManager *>(GetVM()->GetThreadManager());
605     internalId_ = threadManager->GetInternalThreadId();
606 
607     auto ext = Runtime::GetCurrent()->GetClassLinker()->GetExtension(GetThreadLang());
608     if (ext != nullptr) {
609         stringClassPtr_ = ext->GetClassRoot(ClassRoot::STRING);
610     }
611 
612     auto *rs = allocator->New<mem::ReferenceStorage>(pandaVm->GetGlobalObjectStorage(), allocator, false);
613     LOG_IF((rs == nullptr || !rs->Init()), FATAL, RUNTIME) << "Cannot create pt reference storage";
614     ptReferenceStorage_ = PandaUniquePtr<mem::ReferenceStorage>(rs);
615 }
616 
~MTManagedThread()617 MTManagedThread::~MTManagedThread()
618 {
619     ASSERT(internalId_ != 0);
620     auto threadManager = reinterpret_cast<MTThreadManager *>(GetVM()->GetThreadManager());
621     threadManager->RemoveInternalThreadId(internalId_);
622 }
623 
PushLocalObject(ObjectHeader ** objectHeader)624 void ManagedThread::PushLocalObject(ObjectHeader **objectHeader)
625 {
626     ASSERT(TestLockState());
627     localObjects_.push_back(objectHeader);
628     LOG(DEBUG, GC) << "PushLocalObject for thread " << std::hex << this << ", obj = " << *objectHeader;
629 }
630 
PopLocalObject()631 void ManagedThread::PopLocalObject()
632 {
633     ASSERT(TestLockState());
634     ASSERT(!localObjects_.empty());
635     LOG(DEBUG, GC) << "PopLocalObject from thread " << std::hex << this << ", obj = " << *localObjects_.back();
636     localObjects_.pop_back();
637 }
638 
TestLockState() const639 bool ManagedThread::TestLockState() const
640 {
641 #ifndef NDEBUG
642     // Object handles can be created during class initialization, so check lock state only after GC is started.
643     return !ManagedThread::GetCurrent()->GetVM()->GetGC()->IsGCRunning() ||
644            (GetMutatorLock()->GetState() != MutatorLock::MutatorLockState::UNLOCKED);
645 #else
646     return true;
647 #endif
648 }
649 
PushLocalObjectLocked(ObjectHeader * obj)650 void MTManagedThread::PushLocalObjectLocked(ObjectHeader *obj)
651 {
652     localObjectsLocked_.EmplaceBack(obj, GetFrame());
653 }
654 
PopLocalObjectLocked(ObjectHeader * out)655 void MTManagedThread::PopLocalObjectLocked([[maybe_unused]] ObjectHeader *out)
656 {
657     if (LIKELY(!localObjectsLocked_.Empty())) {
658 #ifndef NDEBUG
659         ObjectHeader *obj = localObjectsLocked_.Back().GetObject();
660         if (obj != out) {
661             LOG(WARNING, RUNTIME) << "Locked object is not paired";
662         }
663 #endif  // !NDEBUG
664         localObjectsLocked_.PopBack();
665     } else {
666         LOG(WARNING, RUNTIME) << "PopLocalObjectLocked failed, current thread locked object is empty";
667     }
668 }
669 
GetLockedObjectInfos()670 Span<LockedObjectInfo> MTManagedThread::GetLockedObjectInfos()
671 {
672     return localObjectsLocked_.Data();
673 }
674 
UpdateTLAB(mem::TLAB * tlab)675 void ManagedThread::UpdateTLAB(mem::TLAB *tlab)
676 {
677     ASSERT(tlab_ != nullptr);
678     ASSERT(tlab != nullptr);
679     tlab_ = tlab;
680 }
681 
ClearTLAB()682 void ManagedThread::ClearTLAB()
683 {
684     ASSERT(zeroTlab_ != nullptr);
685     tlab_ = zeroTlab_;
686 }
687 
688 /* Common actions for creation of the thread. */
ProcessCreatedThread()689 void MTManagedThread::ProcessCreatedThread()
690 {
691     ManagedThread::SetCurrent(this);
692     // Runtime takes ownership of the thread
693     trace::ScopedTrace scopedTrace2("ThreadManager::RegisterThread");
694     auto threadManager = reinterpret_cast<MTThreadManager *>(GetVM()->GetThreadManager());
695     threadManager->RegisterThread(this);
696     NativeCodeBegin();
697 }
698 
UpdateGCRoots()699 void ManagedThread::UpdateGCRoots()
700 {
701     if ((exception_ != nullptr) && (exception_->IsForwarded())) {
702         exception_ = ::panda::mem::GetForwardAddress(exception_);
703     }
704     for (auto &&it : localObjects_) {
705         if ((*it)->IsForwarded()) {
706             (*it) = ::panda::mem::GetForwardAddress(*it);
707         }
708     }
709 
710     if (!taggedHandleScopes_.empty()) {
711         taggedHandleStorage_->UpdateHeapObject();
712         taggedGlobalHandleStorage_->UpdateHeapObject();
713     }
714 
715     if (!objectHeaderHandleScopes_.empty()) {
716         objectHeaderHandleStorage_->UpdateHeapObject();
717     }
718 }
719 
720 /* return true if sleep is interrupted */
Sleep(uint64_t ms)721 bool MTManagedThread::Sleep(uint64_t ms)
722 {
723     auto thread = MTManagedThread::GetCurrent();
724     bool isInterrupted = thread->IsInterrupted();
725     if (!isInterrupted) {
726         thread->TimedWait(ThreadStatus::IS_SLEEPING, ms, 0);
727         isInterrupted = thread->IsInterrupted();
728     }
729     return isInterrupted;
730 }
731 
SetThreadPriority(int32_t prio)732 void ManagedThread::SetThreadPriority(int32_t prio)
733 {
734     ThreadId tid = GetId();
735     int res = os::thread::SetPriority(tid, prio);
736     if (!os::thread::IsSetPriorityError(res)) {
737         LOG(DEBUG, RUNTIME) << "Successfully changed priority for thread " << tid << " to " << prio;
738     } else {
739         LOG(DEBUG, RUNTIME) << "Cannot change priority for thread " << tid << " to " << prio;
740     }
741 }
742 
GetThreadPriority()743 uint32_t ManagedThread::GetThreadPriority()
744 {
745     ThreadId tid = GetId();
746     return os::thread::GetPriority(tid);
747 }
748 
UpdateGCRoots()749 void MTManagedThread::UpdateGCRoots()
750 {
751     ManagedThread::UpdateGCRoots();
752     for (auto &it : localObjectsLocked_.Data()) {
753         if (it.GetObject()->IsForwarded()) {
754             it.SetObject(panda::mem::GetForwardAddress(it.GetObject()));
755         }
756     }
757 
758     // Update enter_monitor_object_
759     if (enterMonitorObject_ != nullptr && enterMonitorObject_->IsForwarded()) {
760         enterMonitorObject_ = panda::mem::GetForwardAddress(enterMonitorObject_);
761     }
762 
763     ptReferenceStorage_->UpdateMovedRefs();
764 }
765 
VisitGCRoots(const ObjectVisitor & cb)766 void MTManagedThread::VisitGCRoots(const ObjectVisitor &cb)
767 {
768     ManagedThread::VisitGCRoots(cb);
769 
770     // Visit enter_monitor_object_
771     if (enterMonitorObject_ != nullptr) {
772         cb(enterMonitorObject_);
773     }
774 
775     ptReferenceStorage_->VisitObjects([&cb](const mem::GCRoot &gcRoot) { cb(gcRoot.GetObjectHeader()); },
776                                       mem::RootType::ROOT_PT_LOCAL);
777 }
SetDaemon()778 void MTManagedThread::SetDaemon()
779 {
780     isDaemon_ = true;
781     auto threadManager = reinterpret_cast<MTThreadManager *>(GetVM()->GetThreadManager());
782     threadManager->AddDaemonThread();
783     SetThreadPriority(MIN_PRIORITY);
784 }
785 
Interrupt(MTManagedThread * thread)786 void MTManagedThread::Interrupt(MTManagedThread *thread)
787 {
788     os::memory::LockHolder lock(thread->condLock_);
789     LOG(DEBUG, RUNTIME) << "Interrupt a thread " << thread->GetId();
790     thread->SetInterruptedWithLockHeld(true);
791     thread->SignalWithLockHeld();
792     thread->InterruptPostImpl();
793 }
794 
Interrupted()795 bool MTManagedThread::Interrupted()
796 {
797     os::memory::LockHolder lock(condLock_);
798     bool res = IsInterruptedWithLockHeld();
799     SetInterruptedWithLockHeld(false);
800     return res;
801 }
802 
StopDaemonThread()803 void MTManagedThread::StopDaemonThread()
804 {
805     SetRuntimeTerminated();
806     MTManagedThread::Interrupt(this);
807 }
808 
VisitGCRoots(const ObjectVisitor & cb)809 void ManagedThread::VisitGCRoots(const ObjectVisitor &cb)
810 {
811     if (exception_ != nullptr) {
812         cb(exception_);
813     }
814     for (auto it : localObjects_) {
815         cb(*it);
816     }
817 
818     if (!taggedHandleScopes_.empty()) {
819         taggedHandleStorage_->VisitGCRoots(cb);
820         taggedGlobalHandleStorage_->VisitGCRoots(cb);
821     }
822     if (!objectHeaderHandleScopes_.empty()) {
823         objectHeaderHandleStorage_->VisitGCRoots(cb);
824     }
825 }
826 
Destroy()827 void MTManagedThread::Destroy()
828 {
829     ASSERT(this == ManagedThread::GetCurrent());
830     ASSERT(GetStatus() != ThreadStatus::FINISHED);
831 
832     UpdateStatus(ThreadStatus::TERMINATING);  // Set this status to prevent runtime for destroying itself while this
833                                               // NATTIVE thread
834     // is trying to acquire runtime.
835     ReleaseMonitors();
836     if (!IsDaemon()) {
837         Runtime *runtime = Runtime::GetCurrent();
838         runtime->GetNotificationManager()->ThreadEndEvent(this);
839     }
840 
841     auto threadManager = reinterpret_cast<MTThreadManager *>(GetVM()->GetThreadManager());
842     if (threadManager->UnregisterExitedThread(this)) {
843         // Clear current_thread only if unregistration was successfull
844         ManagedThread::SetCurrent(nullptr);
845     }
846 }
847 
GetCustomTLSData(const char * key)848 CustomTLSData *ManagedThread::GetCustomTLSData(const char *key)
849 {
850     os::memory::LockHolder lock(*Locks::customTlsLock_);
851     auto it = customTlsCache_.find(key);
852     if (it == customTlsCache_.end()) {
853         return nullptr;
854     }
855     return it->second.get();
856 }
857 
SetCustomTLSData(const char * key,CustomTLSData * data)858 void ManagedThread::SetCustomTLSData(const char *key, CustomTLSData *data)
859 {
860     os::memory::LockHolder lock(*Locks::customTlsLock_);
861     PandaUniquePtr<CustomTLSData> tlsData(data);
862     auto it = customTlsCache_.find(key);
863     if (it == customTlsCache_.end()) {
864         customTlsCache_[key] = {PandaUniquePtr<CustomTLSData>()};
865     }
866     customTlsCache_[key].swap(tlsData);
867 }
868 
EraseCustomTLSData(const char * key)869 bool ManagedThread::EraseCustomTLSData(const char *key)
870 {
871     os::memory::LockHolder lock(*Locks::customTlsLock_);
872     return customTlsCache_.erase(key) != 0;
873 }
874 
GetLanguageContext()875 LanguageContext ManagedThread::GetLanguageContext()
876 {
877     return Runtime::GetCurrent()->GetLanguageContext(threadLang_);
878 }
879 
FreeInternalMemory()880 void MTManagedThread::FreeInternalMemory()
881 {
882     localObjectsLocked_.~LockedObjectList<>();
883     ptReferenceStorage_.reset();
884 
885     ManagedThread::FreeInternalMemory();
886 }
887 
DestroyInternalResources()888 void ManagedThread::DestroyInternalResources()
889 {
890     GetVM()->GetGC()->OnThreadTerminate(this, mem::BuffersKeepingFlag::DELETE);
891     ASSERT(preBuff_ == nullptr);
892     ASSERT(g1PostBarrierRingBuffer_ == nullptr);
893     ptThreadInfo_->Destroy();
894 }
895 
CleanupInternalResources()896 void ManagedThread::CleanupInternalResources()
897 {
898     GetVM()->GetGC()->OnThreadTerminate(this, mem::BuffersKeepingFlag::KEEP);
899 }
900 
FreeInternalMemory()901 void ManagedThread::FreeInternalMemory()
902 {
903     threadFrameStates_.~PandaStack<ThreadState>();
904     DestroyInternalResources();
905 
906     localObjects_.~PandaVector<ObjectHeader **>();
907     {
908         os::memory::LockHolder lock(*Locks::customTlsLock_);
909         customTlsCache_.~PandaMap<const char *, PandaUniquePtr<CustomTLSData>>();
910     }
911 
912     mem::InternalAllocatorPtr allocator = Runtime::GetCurrent()->GetInternalAllocator();
913     allocator->Delete(stackFrameAllocator_);
914     allocator->Delete(internalLocalAllocator_);
915 
916     allocator->Delete(ptThreadInfo_.release());
917 
918     taggedHandleScopes_.~PandaVector<HandleScope<coretypes::TaggedType> *>();
919     allocator->Delete(taggedHandleStorage_);
920     allocator->Delete(taggedGlobalHandleStorage_);
921 
922     allocator->Delete(objectHeaderHandleStorage_);
923     objectHeaderHandleScopes_.~PandaVector<HandleScope<ObjectHeader *> *>();
924 
925     Thread::FreeInternalMemory();
926 }
927 
PrintSuspensionStackIfNeeded()928 void ManagedThread::PrintSuspensionStackIfNeeded()
929 {
930     /* @sync 1
931      * @description Before getting runtime options
932      */
933     if (!Runtime::GetOptions().IsSafepointBacktrace()) {
934         /* @sync 2
935          * @description After getting runtime options
936          */
937         return;
938     }
939     /* @sync 3
940      * @description After getting runtime options
941      */
942     PandaStringStream out;
943     out << "Thread " << GetId() << " is suspended at\n";
944     PrintStack(out);
945     LOG(INFO, RUNTIME) << out.str();
946 }
947 
CleanUp()948 void ManagedThread::CleanUp()
949 {
950     // Cleanup Exception, TLAB, cache interpreter, HandleStorage
951     ClearException();
952     ClearTLAB();
953 
954     while (!threadFrameStates_.empty()) {
955         threadFrameStates_.pop();
956     }
957     localObjects_.clear();
958     {
959         os::memory::LockHolder lock(*Locks::customTlsLock_);
960         customTlsCache_.clear();
961     }
962     interpreterCache_.Clear();
963 
964     taggedHandleScopes_.clear();
965     taggedHandleStorage_->FreeHandles(0);
966     taggedGlobalHandleStorage_->FreeHandles();
967 
968     objectHeaderHandleStorage_->FreeHandles(0);
969     objectHeaderHandleScopes_.clear();
970 
971     // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access)
972     fts_.asInt = initialThreadFlag_;
973     StoreStatus<DONT_CHECK_SAFEPOINT, NO_READLOCK>(ThreadStatus::CREATED);
974     // NOTE(molotkovnikhail, 13159) Add cleanup of signal_stack for windows target
975 }
976 
977 }  // namespace panda
978