• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "runtime/include/thread.h"
17 
18 #include "libpandabase/os/stacktrace.h"
19 #include "runtime/handle_base-inl.h"
20 #include "runtime/include/locks.h"
21 #include "runtime/include/object_header-inl.h"
22 #include "runtime/include/runtime.h"
23 #include "runtime/include/runtime_notification.h"
24 #include "runtime/include/stack_walker.h"
25 #include "runtime/include/thread_scopes.h"
26 #include "runtime/interpreter/runtime_interface.h"
27 #include "runtime/handle_scope-inl.h"
28 #include "runtime/mem/object_helpers.h"
29 #include "tooling/pt_thread_info.h"
30 #include "runtime/include/panda_vm.h"
31 #include "runtime/mem/runslots_allocator-inl.h"
32 
33 namespace panda {
34 using TaggedValue = coretypes::TaggedValue;
35 using TaggedType = coretypes::TaggedType;
36 
37 bool ManagedThread::is_initialized = false;
38 mem::TLAB *ManagedThread::zero_tlab = nullptr;
39 static const int MIN_PRIORITY = 19;
40 
GetInternalId()41 MTManagedThread::ThreadId MTManagedThread::GetInternalId()
42 {
43     if (internal_id_ == 0) {
44         internal_id_ = GetVM()->GetThreadManager()->GetInternalThreadId();
45     }
46     return internal_id_;
47 }
48 
49 static thread_local Thread *s_current_thread = nullptr;
50 
51 /* static */
SetCurrent(Thread * thread)52 void Thread::SetCurrent(Thread *thread)
53 {
54     s_current_thread = thread;
55 }
56 
57 /* static */
GetCurrent()58 Thread *Thread::GetCurrent()
59 {
60     return s_current_thread;
61 }
62 
63 /* static */
Initialize()64 bool ManagedThread::Initialize()
65 {
66     ASSERT(!is_initialized);
67     ASSERT(!Thread::GetCurrent());
68     ASSERT(!zero_tlab);
69     mem::InternalAllocatorPtr allocator = Runtime::GetCurrent()->GetInternalAllocator();
70     zero_tlab = allocator->New<mem::TLAB>(nullptr, 0U);
71     is_initialized = true;
72     return true;
73 }
74 
75 /* static */
Shutdown()76 bool ManagedThread::Shutdown()
77 {
78     ASSERT(is_initialized);
79     ASSERT(zero_tlab);
80     is_initialized = false;
81     ManagedThread::SetCurrent(nullptr);
82     mem::InternalAllocatorPtr allocator = Runtime::GetCurrent()->GetInternalAllocator();
83     allocator->Delete(zero_tlab);
84     zero_tlab = nullptr;
85     return true;
86 }
87 
88 /* static */
Yield()89 void MTManagedThread::Yield()
90 {
91     LOG(DEBUG, RUNTIME) << "Reschedule the execution of a current thread";
92     os::thread::Yield();
93 }
94 
95 /* static - creation of the initial Managed thread */
Create(Runtime * runtime,PandaVM * vm)96 ManagedThread *ManagedThread::Create(Runtime *runtime, PandaVM *vm)
97 {
98     trace::ScopedTrace scoped_trace("ManagedThread::Create");
99     mem::InternalAllocatorPtr allocator = runtime->GetInternalAllocator();
100     // Create thread structure using new, we rely on this structure to be accessible in child threads after
101     // runtime is destroyed
102     // CODECHECK-NOLINTNEXTLINE(CPP_RULE_ID_SMARTPOINTER_INSTEADOF_ORIGINPOINTER)
103     return new ManagedThread(os::thread::GetCurrentThreadId(), allocator, vm, Thread::ThreadType::THREAD_TYPE_MANAGED);
104 }
105 
106 /* static - creation of the initial MT Managed thread */
Create(Runtime * runtime,PandaVM * vm)107 MTManagedThread *MTManagedThread::Create(Runtime *runtime, PandaVM *vm)
108 {
109     trace::ScopedTrace scoped_trace("MTManagedThread::Create");
110     mem::InternalAllocatorPtr allocator = runtime->GetInternalAllocator();
111     // Create thread structure using new, we rely on this structure to be accessible in child threads after
112     // runtime is destroyed
113     // CODECHECK-NOLINTNEXTLINE(CPP_RULE_ID_SMARTPOINTER_INSTEADOF_ORIGINPOINTER)
114     auto thread = new MTManagedThread(os::thread::GetCurrentThreadId(), allocator, vm);
115     thread->ProcessCreatedThread();
116     return thread;
117 }
118 
GetInternalAllocator(ManagedThread * thread)119 static mem::InternalAllocatorPtr GetInternalAllocator(ManagedThread *thread)
120 {
121     // WORKAROUND(v.cherkashin): EcmaScript doesn't have HeapManager, so we get internal allocator from runtime
122     mem::HeapManager *heap_manager = thread->GetVM()->GetHeapManager();
123     if (heap_manager != nullptr) {
124         return heap_manager->GetInternalAllocator();
125     }
126     return Runtime::GetCurrent()->GetInternalAllocator();
127 }
128 
ManagedThread(ThreadId id,mem::InternalAllocatorPtr allocator,PandaVM * panda_vm,Thread::ThreadType thread_type)129 ManagedThread::ManagedThread(ThreadId id, mem::InternalAllocatorPtr allocator, PandaVM *panda_vm,
130                              Thread::ThreadType thread_type)
131     : Thread(panda_vm, thread_type), id_(id), ctx_(nullptr), pt_thread_info_(allocator->New<tooling::PtThreadInfo>())
132 {
133     ASSERT(zero_tlab != nullptr);
134     stor_ptr_.tlab_ = zero_tlab;
135 
136     // WORKAROUND(v.cherkashin): EcmaScript doesn't have GC, so we skip setting barriers for this case
137     mem::GC *gc = panda_vm->GetGC();
138     if (gc != nullptr) {
139         pre_barrier_type_ = gc->GetBarrierSet()->GetPreType();
140         post_barrier_type_ = gc->GetBarrierSet()->GetPostType();
141     }
142 
143     stack_frame_allocator_ = allocator->New<mem::FrameAllocator<>>();
144     internal_local_allocator_ =
145         mem::InternalAllocator<>::SetUpLocalInternalAllocator(static_cast<mem::Allocator *>(allocator));
146     tagged_handle_storage_ = allocator->New<HandleStorage<TaggedType>>(allocator);
147     tagged_global_handle_storage_ = allocator->New<GlobalHandleStorage<TaggedType>>(allocator);
148     object_header_handle_storage_ = allocator->New<HandleStorage<ObjectHeader *>>(allocator);
149 }
150 
~ManagedThread()151 ManagedThread::~ManagedThread()
152 {
153     // ManagedThread::ShutDown() may not be called when exiting js_thread, so need set current_thread = nullptr
154     // NB! ThreadManager is expected to store finished threads in separate list and GC destroys them,
155     // current_thread should be nullified in Destroy()
156     // (zero_tlab == nullptr means that we destroyed Runtime and do not need to register TLAB)
157     if (zero_tlab != nullptr) {
158         // We should register TLAB size for MemStats during thread destroy.
159         GetVM()->GetHeapManager()->RegisterTLAB(GetTLAB());
160     }
161 
162     mem::InternalAllocatorPtr allocator = GetInternalAllocator(this);
163     allocator->Delete(object_header_handle_storage_);
164     allocator->Delete(tagged_global_handle_storage_);
165     allocator->Delete(tagged_handle_storage_);
166     mem::InternalAllocator<>::FinalizeLocalInternalAllocator(internal_local_allocator_,
167                                                              static_cast<mem::Allocator *>(allocator));
168     internal_local_allocator_ = nullptr;
169     allocator->Delete(stack_frame_allocator_);
170     allocator->Delete(pt_thread_info_.release());
171 }
172 
MTManagedThread(ThreadId id,mem::InternalAllocatorPtr allocator,PandaVM * panda_vm)173 MTManagedThread::MTManagedThread(ThreadId id, mem::InternalAllocatorPtr allocator, PandaVM *panda_vm)
174     : ManagedThread(id, allocator, panda_vm, Thread::ThreadType::THREAD_TYPE_MT_MANAGED),
175       thread_frame_states_(allocator->Adapter()),
176       waiting_monitor_(nullptr)
177 {
178     internal_id_ = GetVM()->GetThreadManager()->GetInternalThreadId();
179 
180     mem::GC *gc = panda_vm->GetGC();
181     auto barrier = gc->GetBarrierSet();
182     if (barrier->GetPostType() != panda::mem::BarrierType::POST_WRB_NONE) {
183         auto func1 = barrier->GetBarrierOperand(panda::mem::BarrierPosition::BARRIER_POSITION_POST, "MIN_ADDR");
184         stor_ptr_.card_table_min_addr_ = std::get<void *>(func1.GetValue());
185         auto func2 = barrier->GetBarrierOperand(panda::mem::BarrierPosition::BARRIER_POSITION_POST, "CARD_TABLE_ADDR");
186         stor_ptr_.card_table_addr_ = std::get<uint8_t *>(func2.GetValue());
187     }
188     if (barrier->GetPreType() != panda::mem::BarrierType::PRE_WRB_NONE) {
189         auto addr =
190             barrier->GetBarrierOperand(panda::mem::BarrierPosition::BARRIER_POSITION_PRE, "CONCURRENT_MARKING_ADDR");
191         stor_ptr_.concurrent_marking_addr_ = std::get<bool *>(addr.GetValue());
192         auto func =
193             barrier->GetBarrierOperand(panda::mem::BarrierPosition::BARRIER_POSITION_PRE, "STORE_IN_BUFF_TO_MARK_FUNC");
194     }
195 
196     auto ext = Runtime::GetCurrent()->GetClassLinker()->GetExtension(GetLanguageContext());
197     if (ext != nullptr) {
198         stor_ptr_.string_class_ptr_ = ext->GetClassRoot(ClassRoot::STRING);
199     }
200 
201     auto *rs = allocator->New<mem::ReferenceStorage>(panda_vm->GetGlobalObjectStorage(), allocator, false);
202     LOG_IF((rs == nullptr || !rs->Init()), FATAL, RUNTIME) << "Cannot create pt reference storage";
203     pt_reference_storage_ = PandaUniquePtr<mem::ReferenceStorage>(rs);
204 }
205 
~MTManagedThread()206 MTManagedThread::~MTManagedThread()
207 {
208     ASSERT(internal_id_ != 0);
209     GetVM()->GetThreadManager()->RemoveInternalThreadId(internal_id_);
210 
211     ASSERT(thread_frame_states_.empty() && "stack should be empty");
212 }
213 
SafepointPoll()214 void MTManagedThread::SafepointPoll()
215 {
216     if (this->TestAllFlags()) {
217         trace::ScopedTrace scoped_trace("RunSafepoint");
218         panda::interpreter::RuntimeInterface::Safepoint();
219     }
220 }
221 
NativeCodeBegin()222 void MTManagedThread::NativeCodeBegin()
223 {
224     LOG_IF(!(thread_frame_states_.empty() || thread_frame_states_.top() != NATIVE_CODE), FATAL, RUNTIME)
225         << LogThreadStack(NATIVE_CODE) << " or stack should be empty";
226     thread_frame_states_.push(NATIVE_CODE);
227     UpdateStatus(NATIVE);
228     is_managed_scope_ = false;
229 }
230 
NativeCodeEnd()231 void MTManagedThread::NativeCodeEnd()
232 {
233     // thread_frame_states_ should not be accessed without MutatorLock (as runtime could have been destroyed)
234     // If this was last frame, it should have been called from Destroy() and it should UpdateStatus to FINISHED
235     // after this method
236     UpdateStatus(RUNNING);
237     is_managed_scope_ = true;
238     LOG_IF(thread_frame_states_.empty(), FATAL, RUNTIME) << "stack should be not empty";
239     LOG_IF(thread_frame_states_.top() != NATIVE_CODE, FATAL, RUNTIME) << LogThreadStack(NATIVE_CODE);
240     thread_frame_states_.pop();
241 }
242 
IsInNativeCode() const243 bool MTManagedThread::IsInNativeCode() const
244 {
245     LOG_IF(HasClearStack(), FATAL, RUNTIME) << "stack should be not empty";
246     return thread_frame_states_.top() == NATIVE_CODE;
247 }
248 
ManagedCodeBegin()249 void MTManagedThread::ManagedCodeBegin()
250 {
251     // thread_frame_states_ should not be accessed without MutatorLock (as runtime could have been destoryed)
252     UpdateStatus(RUNNING);
253     is_managed_scope_ = true;
254     LOG_IF(HasClearStack(), FATAL, RUNTIME) << "stack should be not empty";
255     LOG_IF(thread_frame_states_.top() != NATIVE_CODE, FATAL, RUNTIME) << LogThreadStack(MANAGED_CODE);
256     thread_frame_states_.push(MANAGED_CODE);
257 }
258 
ManagedCodeEnd()259 void MTManagedThread::ManagedCodeEnd()
260 {
261     LOG_IF(HasClearStack(), FATAL, RUNTIME) << "stack should be not empty";
262     LOG_IF(thread_frame_states_.top() != MANAGED_CODE, FATAL, RUNTIME) << LogThreadStack(MANAGED_CODE);
263     thread_frame_states_.pop();
264     // Should be NATIVE_CODE
265     UpdateStatus(NATIVE);
266     is_managed_scope_ = false;
267 }
268 
IsManagedCode() const269 bool MTManagedThread::IsManagedCode() const
270 {
271     LOG_IF(HasClearStack(), FATAL, RUNTIME) << "stack should be not empty";
272     return thread_frame_states_.top() == MANAGED_CODE;
273 }
274 
275 // Since we don't allow two consecutive NativeCode frames, there is no managed code on stack if
276 // its size is 1 and last frame is Native
HasManagedCodeOnStack() const277 bool MTManagedThread::HasManagedCodeOnStack() const
278 {
279     if (HasClearStack()) {
280         return false;
281     }
282     if (thread_frame_states_.size() == 1 && IsInNativeCode()) {
283         return false;
284     }
285     return true;
286 }
287 
HasClearStack() const288 bool MTManagedThread::HasClearStack() const
289 {
290     return thread_frame_states_.empty();
291 }
292 
LogThreadStack(ThreadState new_state) const293 PandaString MTManagedThread::LogThreadStack(ThreadState new_state) const
294 {
295     PandaStringStream debug_message;
296     static std::unordered_map<ThreadState, std::string> thread_state_to_string_map = {
297         {ThreadState::NATIVE_CODE, "NATIVE_CODE"}, {ThreadState::MANAGED_CODE, "MANAGED_CODE"}};
298     auto new_state_it = thread_state_to_string_map.find(new_state);
299     auto top_frame_it = thread_state_to_string_map.find(thread_frame_states_.top());
300     ASSERT(new_state_it != thread_state_to_string_map.end());
301     ASSERT(top_frame_it != thread_state_to_string_map.end());
302 
303     debug_message << "threadId: " << GetId() << " "
304                   << "tried go to " << new_state_it->second << " state, but last frame is: " << top_frame_it->second
305                   << ", " << thread_frame_states_.size() << " frames in stack (from up to bottom): [";
306 
307     PandaStack<ThreadState> copy_stack(thread_frame_states_);
308     while (!copy_stack.empty()) {
309         auto it = thread_state_to_string_map.find(copy_stack.top());
310         ASSERT(it != thread_state_to_string_map.end());
311         debug_message << it->second;
312         if (copy_stack.size() > 1) {
313             debug_message << "|";
314         }
315         copy_stack.pop();
316     }
317     debug_message << "]";
318     return debug_message.str();
319 }
320 
PushLocalObject(ObjectHeader ** object_header)321 void ManagedThread::PushLocalObject(ObjectHeader **object_header)
322 {
323     // Object handles can be created during class initialization, so check lock state only after GC is started.
324     ASSERT(!ManagedThread::GetCurrent()->GetVM()->GetGC()->IsGCRunning() ||
325            (Locks::mutator_lock->GetState() != MutatorLock::MutatorLockState::UNLOCKED) || this->IsJSThread());
326     local_objects_.push_back(object_header);
327     LOG(DEBUG, GC) << "PushLocalObject for thread " << std::hex << this << ", obj = " << *object_header;
328 }
329 
PopLocalObject()330 void ManagedThread::PopLocalObject()
331 {
332     // Object handles can be created during class initialization, so check lock state only after GC is started.
333     ASSERT(!ManagedThread::GetCurrent()->GetVM()->GetGC()->IsGCRunning() ||
334            (Locks::mutator_lock->GetState() != MutatorLock::MutatorLockState::UNLOCKED) || this->IsJSThread());
335     ASSERT(!local_objects_.empty());
336     LOG(DEBUG, GC) << "PopLocalObject from thread " << std::hex << this << ", obj = " << *local_objects_.back();
337     local_objects_.pop_back();
338 }
339 
GetMonitors()340 std::unordered_set<Monitor *> &MTManagedThread::GetMonitors()
341 {
342     return entered_monitors_;
343 }
344 
AddMonitor(Monitor * monitor)345 void MTManagedThread::AddMonitor(Monitor *monitor)
346 {
347     os::memory::LockHolder lock(monitor_lock_);
348     entered_monitors_.insert(monitor);
349     LOG(DEBUG, RUNTIME) << "Adding monitor " << monitor->GetId() << " to thread " << GetId();
350 }
351 
RemoveMonitor(Monitor * monitor)352 void MTManagedThread::RemoveMonitor(Monitor *monitor)
353 {
354     os::memory::LockHolder lock(monitor_lock_);
355     entered_monitors_.erase(monitor);
356     LOG(DEBUG, RUNTIME) << "Removing monitor " << monitor->GetId();
357 }
358 
ReleaseMonitors()359 void MTManagedThread::ReleaseMonitors()
360 {
361     os::memory::LockHolder lock(monitor_lock_);
362     while (!entered_monitors_.empty()) {
363         auto monitors = entered_monitors_;
364         for (auto monitor : monitors) {
365             LOG(DEBUG, RUNTIME) << "Releasing monitor " << monitor->GetId();
366             monitor->Release(this);
367         }
368     }
369 }
370 
PushLocalObjectLocked(ObjectHeader * obj)371 void MTManagedThread::PushLocalObjectLocked(ObjectHeader *obj)
372 {
373     LockedObjectInfo new_locked_obj = {obj, GetFrame()};
374     local_objects_locked_.emplace_back(new_locked_obj);
375 }
376 
PopLocalObjectLocked(ObjectHeader * out)377 void MTManagedThread::PopLocalObjectLocked([[maybe_unused]] ObjectHeader *out)
378 {
379     if (LIKELY(!local_objects_locked_.empty())) {
380 #ifndef NDEBUG
381         ObjectHeader *obj = local_objects_locked_.back().GetObject();
382         if (obj != out) {
383             LOG(WARNING, RUNTIME) << "Locked object is not paired";
384         }
385 #endif  // !NDEBUG
386         local_objects_locked_.pop_back();
387     } else {
388         LOG(WARNING, RUNTIME) << "PopLocalObjectLocked failed, current thread locked object is empty";
389     }
390 }
391 
GetLockedObjectInfos()392 const PandaVector<LockedObjectInfo> &MTManagedThread::GetLockedObjectInfos()
393 {
394     return local_objects_locked_;
395 }
396 
UpdateTLAB(mem::TLAB * tlab)397 void ManagedThread::UpdateTLAB(mem::TLAB *tlab)
398 {
399     ASSERT(stor_ptr_.tlab_ != nullptr);
400     ASSERT(tlab != nullptr);
401     stor_ptr_.tlab_ = tlab;
402 }
403 
ClearTLAB()404 void ManagedThread::ClearTLAB()
405 {
406     ASSERT(zero_tlab != nullptr);
407     stor_ptr_.tlab_ = zero_tlab;
408 }
409 
410 /* Common actions for creation of the thread. */
ProcessCreatedThread()411 void MTManagedThread::ProcessCreatedThread()
412 {
413     ManagedThread::SetCurrent(this);
414     // Runtime takes ownership of the thread
415     trace::ScopedTrace scoped_trace2("ThreadManager::RegisterThread");
416     GetVM()->GetThreadManager()->RegisterThread(this);
417     NativeCodeBegin();
418 }
419 
UpdateGCRoots()420 void ManagedThread::UpdateGCRoots()
421 {
422     if ((stor_ptr_.exception_ != nullptr) && (stor_ptr_.exception_->IsForwarded())) {
423         stor_ptr_.exception_ = ::panda::mem::GetForwardAddress(stor_ptr_.exception_);
424     }
425     for (auto &&it : local_objects_) {
426         if ((*it)->IsForwarded()) {
427             (*it) = ::panda::mem::GetForwardAddress(*it);
428         }
429     }
430 
431     if (!tagged_handle_scopes_.empty()) {
432         tagged_handle_storage_->UpdateHeapObject();
433         tagged_global_handle_storage_->UpdateHeapObject();
434     }
435 
436     if (!object_header_handle_scopes_.empty()) {
437         object_header_handle_storage_->UpdateHeapObject();
438     }
439 }
440 
441 /* return true if sleep is interrupted */
Sleep(uint64_t ms)442 bool MTManagedThread::Sleep(uint64_t ms)
443 {
444     auto thread = MTManagedThread::GetCurrent();
445     bool is_interrupted = thread->IsInterrupted();
446     if (!is_interrupted) {
447         thread->TimedWait(IS_SLEEPING, ms, 0);
448         is_interrupted = thread->IsInterrupted();
449     }
450     return is_interrupted;
451 }
452 
SetThreadPriority(int32_t prio)453 void ManagedThread::SetThreadPriority(int32_t prio)
454 {
455     ThreadId tid = GetId();
456     int res = os::thread::SetPriority(tid, prio);
457     if (res == 0) {
458         LOG(DEBUG, RUNTIME) << "Successfully changed priority for thread " << tid << " to " << prio;
459     } else {
460         LOG(DEBUG, RUNTIME) << "Cannot change priority for thread " << tid << " to " << prio;
461     }
462 }
463 
GetThreadPriority() const464 uint32_t ManagedThread::GetThreadPriority() const
465 {
466     ThreadId tid = GetId();
467     return os::thread::GetPriority(tid);
468 }
469 
UpdateGCRoots()470 void MTManagedThread::UpdateGCRoots()
471 {
472     ManagedThread::UpdateGCRoots();
473     for (auto &it : local_objects_locked_) {
474         if (it.GetObject()->IsForwarded()) {
475             it.SetObject(panda::mem::GetForwardAddress(it.GetObject()));
476         }
477     }
478 
479     pt_reference_storage_->UpdateMovedRefs();
480 }
481 
SetDaemon()482 void MTManagedThread::SetDaemon()
483 {
484     is_daemon_ = true;
485     GetVM()->GetThreadManager()->AddDaemonThread();
486     SetThreadPriority(MIN_PRIORITY);
487 }
488 
Interrupt(MTManagedThread * thread)489 void MTManagedThread::Interrupt(MTManagedThread *thread)
490 {
491     os::memory::LockHolder lock(thread->cond_lock_);
492     LOG(DEBUG, RUNTIME) << "Interrupt a thread " << thread->GetId();
493     thread->SetInterruptedWithLockHeld(true);
494     thread->SignalWithLockHeld();
495     thread->InterruptPostImpl();
496 }
497 
Interrupted()498 bool MTManagedThread::Interrupted()
499 {
500     os::memory::LockHolder lock(cond_lock_);
501     bool res = IsInterruptedWithLockHeld();
502     SetInterruptedWithLockHeld(false);
503     return res;
504 }
505 
StopDaemon0()506 void MTManagedThread::StopDaemon0()
507 {
508     SetRuntimeTerminated();
509 }
510 
StopDaemonThread()511 void MTManagedThread::StopDaemonThread()
512 {
513     StopDaemon0();
514     MTManagedThread::Interrupt(this);
515 }
516 
517 // NO_THREAD_SAFETY_ANALYSIS due to TSAN not being able to determine lock status
SuspendCheck()518 void MTManagedThread::SuspendCheck() NO_THREAD_SAFETY_ANALYSIS
519 {
520     // We should use internal suspension to avoid missing call of IncSuspend
521     SuspendImpl(true);
522     Locks::mutator_lock->Unlock();
523     Locks::mutator_lock->ReadLock();
524     ResumeImpl(true);
525 }
526 
SuspendImpl(bool internal_suspend)527 void MTManagedThread::SuspendImpl(bool internal_suspend)
528 {
529     os::memory::LockHolder lock(suspend_lock_);
530     LOG(DEBUG, RUNTIME) << "Suspending thread " << GetId();
531     if (!internal_suspend && IsUserSuspended()) {
532         LOG(DEBUG, RUNTIME) << "thread " << GetId() << " is already suspended";
533         return;
534     }
535     IncSuspended(internal_suspend);
536 }
537 
ResumeImpl(bool internal_resume)538 void MTManagedThread::ResumeImpl(bool internal_resume)
539 {
540     os::memory::LockHolder lock(suspend_lock_);
541     LOG(DEBUG, RUNTIME) << "Resuming thread " << GetId();
542     if (!internal_resume && !IsUserSuspended()) {
543         LOG(DEBUG, RUNTIME) << "thread " << GetId() << " is already resumed";
544         return;
545     }
546     DecSuspended(internal_resume);
547     // Help for UnregisterExitedThread
548     TSAN_ANNOTATE_HAPPENS_BEFORE(&stor_32_.fts_);
549     StopSuspension();
550 }
551 
VisitGCRoots(const ObjectVisitor & cb)552 void ManagedThread::VisitGCRoots(const ObjectVisitor &cb)
553 {
554     if (stor_ptr_.exception_ != nullptr) {
555         cb(stor_ptr_.exception_);
556     }
557     for (auto it : local_objects_) {
558         cb(*it);
559     }
560 
561     if (!tagged_handle_scopes_.empty()) {
562         tagged_handle_storage_->VisitGCRoots(cb);
563         tagged_global_handle_storage_->VisitGCRoots(cb);
564     }
565     if (!object_header_handle_scopes_.empty()) {
566         object_header_handle_storage_->VisitGCRoots(cb);
567     }
568 }
569 
VisitGCRoots(const ObjectVisitor & cb)570 void MTManagedThread::VisitGCRoots(const ObjectVisitor &cb)
571 {
572     ManagedThread::VisitGCRoots(cb);
573 
574     pt_reference_storage_->VisitObjects([&cb](const mem::GCRoot &gc_root) { cb(gc_root.GetObjectHeader()); },
575                                         mem::RootType::ROOT_PT_LOCAL);
576 }
577 
Destroy()578 void MTManagedThread::Destroy()
579 {
580     ASSERT(this == ManagedThread::GetCurrent());
581     if (GetStatus() == FINISHED) {
582         return;
583     }
584 
585     UpdateStatus(TERMINATING);  // Set this status to prevent runtime for destroying itself while this NATTIVE thread
586                                 // is trying to acquire runtime.
587     ReleaseMonitors();
588     Runtime *runtime = Runtime::GetCurrent();
589     if (!IsDaemon()) {
590         runtime->GetNotificationManager()->ThreadEndEvent(GetId());
591     }
592 
593     {
594         ScopedManagedCodeThread s(this);
595         GetPtThreadInfo()->Destroy();
596     }
597 
598     NativeCodeEnd();
599 
600     if (GetVM()->GetThreadManager()->UnregisterExitedThread(this)) {
601         // Clear current_thread only if unregistration was successful
602         ManagedThread::SetCurrent(nullptr);
603     }
604 }
605 
GetCustomTLSData(const char * key)606 CustomTLSData *ManagedThread::GetCustomTLSData(const char *key)
607 {
608     os::memory::LockHolder lock(*Locks::custom_tls_lock);
609     auto it = custom_tls_cache_.find(key);
610     if (it == custom_tls_cache_.end()) {
611         return nullptr;
612     }
613     return it->second.get();
614 }
615 
SetCustomTLSData(const char * key,CustomTLSData * data)616 void ManagedThread::SetCustomTLSData(const char *key, CustomTLSData *data)
617 {
618     os::memory::LockHolder lock(*Locks::custom_tls_lock);
619     PandaUniquePtr<CustomTLSData> tls_data(data);
620     auto it = custom_tls_cache_.find(key);
621     if (it == custom_tls_cache_.end()) {
622         custom_tls_cache_[key] = {PandaUniquePtr<CustomTLSData>()};
623     }
624     custom_tls_cache_[key].swap(tls_data);
625 }
626 
GetLanguageContext()627 LanguageContext ManagedThread::GetLanguageContext()
628 {
629     return GetVM()->GetLanguageContext();
630 }
631 
FreeInternalMemory()632 void MTManagedThread::FreeInternalMemory()
633 {
634     thread_frame_states_.~PandaStack<ThreadState>();
635     local_objects_locked_.~PandaVector<LockedObjectInfo>();
636 
637     ManagedThread::FreeInternalMemory();
638 }
639 
FreeInternalMemory()640 void ManagedThread::FreeInternalMemory()
641 {
642     local_objects_.~PandaVector<ObjectHeader **>();
643     {
644         os::memory::LockHolder lock(*Locks::custom_tls_lock);
645         custom_tls_cache_.~PandaMap<const char *, PandaUniquePtr<CustomTLSData>>();
646     }
647 
648     mem::InternalAllocatorPtr allocator = Runtime::GetCurrent()->GetInternalAllocator();
649     allocator->Delete(stack_frame_allocator_);
650     allocator->Delete(internal_local_allocator_);
651 
652     {
653         ScopedManagedCodeThread smt(MTManagedThread::GetCurrent());
654         pt_thread_info_->Destroy();
655     }
656     allocator->Delete(pt_thread_info_.release());
657 
658     tagged_handle_scopes_.~PandaVector<HandleScope<coretypes::TaggedType> *>();
659     allocator->Delete(tagged_handle_storage_);
660     allocator->Delete(tagged_global_handle_storage_);
661 
662     allocator->Delete(object_header_handle_storage_);
663     object_header_handle_scopes_.~PandaVector<HandleScope<ObjectHeader *> *>();
664 }
665 
PrintSuspensionStackIfNeeded()666 void ManagedThread::PrintSuspensionStackIfNeeded()
667 {
668     if (!Runtime::GetOptions().IsSafepointBacktrace()) {
669         return;
670     }
671     PandaStringStream out;
672     out << "Thread " << GetId() << " is suspended at\n";
673     PrintStack(out);
674     LOG(INFO, RUNTIME) << out.str();
675 }
676 
677 }  // namespace panda
678