1 /**
2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "runtime/include/thread-inl.h"
17 #include "libpandabase/os/stacktrace.h"
18 #include "runtime/handle_base-inl.h"
19 #include "runtime/include/locks.h"
20 #include "runtime/include/object_header-inl.h"
21 #include "runtime/include/runtime.h"
22 #include "runtime/include/runtime_notification.h"
23 #include "runtime/include/stack_walker.h"
24 #include "runtime/include/thread_scopes.h"
25 #include "runtime/interpreter/runtime_interface.h"
26 #include "runtime/handle_scope-inl.h"
27 #include "runtime/mem/object_helpers.h"
28 #include "tooling/pt_thread_info.h"
29 #include "runtime/include/panda_vm.h"
30 #include "runtime/mem/runslots_allocator-inl.h"
31
32 namespace panda {
33 using TaggedValue = coretypes::TaggedValue;
34 using TaggedType = coretypes::TaggedType;
35
36 mem::TLAB *ManagedThread::zero_tlab = nullptr;
37 static const int MIN_PRIORITY = os::thread::LOWEST_PRIORITY;
38
GetInternalAllocator(Thread * thread)39 static mem::InternalAllocatorPtr GetInternalAllocator(Thread *thread)
40 {
41 // WORKAROUND(v.cherkashin): EcmaScript side build doesn't have HeapManager, so we get internal allocator from
42 // runtime
43 mem::HeapManager *heap_manager = thread->GetVM()->GetHeapManager();
44 if (heap_manager != nullptr) {
45 return heap_manager->GetInternalAllocator();
46 }
47 return Runtime::GetCurrent()->GetInternalAllocator();
48 }
49
GetInternalId()50 MTManagedThread::ThreadId MTManagedThread::GetInternalId()
51 {
52 ASSERT(internal_id_ != 0);
53 return internal_id_;
54 }
55
~Thread()56 Thread::~Thread()
57 {
58 FreeAllocatedMemory();
59 }
60
FreeInternalMemory()61 void Thread::FreeInternalMemory()
62 {
63 FreeAllocatedMemory();
64 }
65
FreeAllocatedMemory()66 void Thread::FreeAllocatedMemory()
67 {
68 auto allocator = Runtime::GetCurrent()->GetInternalAllocator();
69 ASSERT(allocator != nullptr);
70 allocator->Delete(pre_buff_);
71 allocator->Delete(g1_post_barrier_ring_buffer_);
72 pre_buff_ = nullptr;
73 g1_post_barrier_ring_buffer_ = nullptr;
74 }
75
Thread(PandaVM * vm,ThreadType thread_type)76 Thread::Thread(PandaVM *vm, ThreadType thread_type) : vm_(vm), thread_type_(thread_type)
77 {
78 // WORKAROUND(v.cherkashin): EcmaScript side build doesn't have GC, so we skip setting barriers for this case
79 mem::GC *gc = vm->GetGC();
80 if (gc != nullptr) {
81 barrier_set_ = vm->GetGC()->GetBarrierSet();
82 InitCardTableData(barrier_set_);
83 if (barrier_set_->GetPreType() != panda::mem::BarrierType::PRE_WRB_NONE) {
84 auto addr = barrier_set_->GetBarrierOperand(panda::mem::BarrierPosition::BARRIER_POSITION_PRE,
85 "CONCURRENT_MARKING_ADDR");
86 concurrent_marking_addr_ = std::get<std::atomic<bool> *>(addr.GetValue());
87 }
88 }
89 }
90
InitCardTableData(mem::GCBarrierSet * barrier)91 void Thread::InitCardTableData(mem::GCBarrierSet *barrier)
92 {
93 auto post_barrier_type = barrier->GetPostType();
94 switch (post_barrier_type) {
95 case panda::mem::BarrierType::POST_INTERGENERATIONAL_BARRIER:
96 card_table_min_addr_ = std::get<void *>(barrier->GetPostBarrierOperand("MIN_ADDR").GetValue());
97 card_table_addr_ = std::get<uint8_t *>(barrier->GetPostBarrierOperand("CARD_TABLE_ADDR").GetValue());
98 break;
99 case panda::mem::BarrierType::POST_INTERREGION_BARRIER:
100 card_table_addr_ = std::get<uint8_t *>(barrier->GetPostBarrierOperand("CARD_TABLE_ADDR").GetValue());
101 card_table_min_addr_ = std::get<void *>(barrier->GetPostBarrierOperand("MIN_ADDR").GetValue());
102 // TODO(dtrubenkov): add REGION_SIZE_BITS
103 break;
104 case panda::mem::BarrierType::POST_WRB_NONE:
105 case mem::POST_RB_NONE:
106 break;
107 case mem::PRE_WRB_NONE:
108 case mem::PRE_RB_NONE:
109 case mem::PRE_SATB_BARRIER:
110 LOG(FATAL, RUNTIME) << "Post barrier expected";
111 break;
112 }
113 }
114
InitPreBuff()115 void Thread::InitPreBuff()
116 {
117 auto allocator = GetInternalAllocator(this);
118 mem::GC *gc = GetVM()->GetGC();
119 auto barrier = gc->GetBarrierSet();
120 if (barrier->GetPreType() != panda::mem::BarrierType::PRE_WRB_NONE) {
121 pre_buff_ = allocator->New<PandaVector<ObjectHeader *>>();
122 }
123 }
124
125 /* static */
Initialize()126 void ManagedThread::Initialize()
127 {
128 ASSERT(!Thread::GetCurrent());
129 ASSERT(!zero_tlab);
130 mem::InternalAllocatorPtr allocator = Runtime::GetCurrent()->GetInternalAllocator();
131 zero_tlab = allocator->New<mem::TLAB>(nullptr, 0U);
132 }
133
134 /* static */
Shutdown()135 void ManagedThread::Shutdown()
136 {
137 ASSERT(zero_tlab);
138 ManagedThread::SetCurrent(nullptr);
139 mem::InternalAllocatorPtr allocator = Runtime::GetCurrent()->GetInternalAllocator();
140 allocator->Delete(zero_tlab);
141 zero_tlab = nullptr;
142 }
143
144 /* static */
Yield()145 void MTManagedThread::Yield()
146 {
147 LOG(DEBUG, RUNTIME) << "Reschedule the execution of a current thread";
148 os::thread::ThreadYield();
149 }
150
151 /* static - creation of the initial Managed thread */
Create(Runtime * runtime,PandaVM * vm,panda::panda_file::SourceLang thread_lang)152 ManagedThread *ManagedThread::Create(Runtime *runtime, PandaVM *vm, panda::panda_file::SourceLang thread_lang)
153 {
154 trace::ScopedTrace scoped_trace("ManagedThread::Create");
155 mem::InternalAllocatorPtr allocator = runtime->GetInternalAllocator();
156 // Create thread structure using new, we rely on this structure to be accessible in child threads after
157 // runtime is destroyed
158 return new ManagedThread(os::thread::GetCurrentThreadId(), allocator, vm, Thread::ThreadType::THREAD_TYPE_MANAGED,
159 thread_lang);
160 }
161
162 /* static - creation of the initial MT Managed thread */
Create(Runtime * runtime,PandaVM * vm,panda::panda_file::SourceLang thread_lang)163 MTManagedThread *MTManagedThread::Create(Runtime *runtime, PandaVM *vm, panda::panda_file::SourceLang thread_lang)
164 {
165 trace::ScopedTrace scoped_trace("MTManagedThread::Create");
166 mem::InternalAllocatorPtr allocator = runtime->GetInternalAllocator();
167 // Create thread structure using new, we rely on this structure to be accessible in child threads after
168 // runtime is destroyed
169 auto thread = new MTManagedThread(os::thread::GetCurrentThreadId(), allocator, vm, thread_lang);
170 thread->ProcessCreatedThread();
171 return thread;
172 }
173
ManagedThread(ThreadId id,mem::InternalAllocatorPtr allocator,PandaVM * panda_vm,Thread::ThreadType thread_type,panda::panda_file::SourceLang thread_lang)174 ManagedThread::ManagedThread(ThreadId id, mem::InternalAllocatorPtr allocator, PandaVM *panda_vm,
175 Thread::ThreadType thread_type, panda::panda_file::SourceLang thread_lang)
176 : Thread(panda_vm, thread_type),
177 id_(id),
178 thread_lang_(thread_lang),
179 pt_thread_info_(allocator->New<tooling::PtThreadInfo>()),
180 thread_frame_states_(allocator->Adapter())
181 {
182 ASSERT(zero_tlab != nullptr);
183 tlab_ = zero_tlab;
184
185 // WORKAROUND(v.cherkashin): EcmaScript side build doesn't have GC, so we skip setting barriers for this case
186 mem::GC *gc = panda_vm->GetGC();
187 if (gc != nullptr) {
188 pre_barrier_type_ = gc->GetBarrierSet()->GetPreType();
189 post_barrier_type_ = gc->GetBarrierSet()->GetPostType();
190 auto barrier_set = gc->GetBarrierSet();
191 if (barrier_set->GetPreType() != panda::mem::BarrierType::PRE_WRB_NONE) {
192 auto addr = barrier_set->GetBarrierOperand(panda::mem::BarrierPosition::BARRIER_POSITION_PRE,
193 "CONCURRENT_MARKING_ADDR");
194 concurrent_marking_addr_ = std::get<std::atomic<bool> *>(addr.GetValue());
195 pre_buff_ = allocator->New<PandaVector<ObjectHeader *>>();
196 // need to initialize in constructor because we have barriers between constructor and InitBuffers in
197 // InitializedClasses
198 g1_post_barrier_ring_buffer_ = allocator->New<mem::GCG1BarrierSet::G1PostBarrierRingBufferType>();
199 }
200 }
201
202 stack_frame_allocator_ =
203 allocator->New<mem::StackFrameAllocator>(Runtime::GetOptions().UseMallocForInternalAllocations());
204 internal_local_allocator_ =
205 mem::InternalAllocator<>::SetUpLocalInternalAllocator(static_cast<mem::Allocator *>(allocator));
206 tagged_handle_storage_ = allocator->New<HandleStorage<TaggedType>>(allocator);
207 tagged_global_handle_storage_ = allocator->New<GlobalHandleStorage<TaggedType>>(allocator);
208 object_header_handle_storage_ = allocator->New<HandleStorage<ObjectHeader *>>(allocator);
209 }
210
~ManagedThread()211 ManagedThread::~ManagedThread()
212 {
213 // ManagedThread::ShutDown() may not be called when exiting js_thread, so need set current_thread = nullptr
214 // NB! ThreadManager is expected to store finished threads in separate list and GC destroys them,
215 // current_thread should be nullified in Destroy()
216 // (zero_tlab == nullptr means that we destroyed Runtime and do not need to register TLAB)
217 if (zero_tlab != nullptr) {
218 // We should register TLAB size for MemStats during thread destroy.
219 GetVM()->GetHeapManager()->RegisterTLAB(GetTLAB());
220 }
221
222 mem::InternalAllocatorPtr allocator = GetInternalAllocator(this);
223 allocator->Delete(object_header_handle_storage_);
224 allocator->Delete(tagged_global_handle_storage_);
225 allocator->Delete(tagged_handle_storage_);
226 mem::InternalAllocator<>::FinalizeLocalInternalAllocator(internal_local_allocator_,
227 static_cast<mem::Allocator *>(allocator));
228 internal_local_allocator_ = nullptr;
229 allocator->Delete(stack_frame_allocator_);
230 allocator->Delete(pt_thread_info_.release());
231
232 ASSERT(thread_frame_states_.empty() && "stack should be empty");
233 }
234
InitBuffers()235 void ManagedThread::InitBuffers()
236 {
237 auto allocator = GetInternalAllocator(this);
238 mem::GC *gc = GetVM()->GetGC();
239 auto barrier = gc->GetBarrierSet();
240 if (barrier->GetPreType() != panda::mem::BarrierType::PRE_WRB_NONE) {
241 // we need to recreate buffers if it was detach (we removed all structures) and attach again
242 // skip initializing in first attach after constructor
243 if (pre_buff_ == nullptr) {
244 ASSERT(pre_buff_ == nullptr);
245 pre_buff_ = allocator->New<PandaVector<ObjectHeader *>>();
246 ASSERT(g1_post_barrier_ring_buffer_ == nullptr);
247 g1_post_barrier_ring_buffer_ = allocator->New<mem::GCG1BarrierSet::G1PostBarrierRingBufferType>();
248 }
249 }
250 }
251
GetStackTop()252 NO_INLINE static uintptr_t GetStackTop()
253 {
254 return ToUintPtr(__builtin_frame_address(0));
255 }
256
LoadStackPages(uintptr_t end_addr)257 NO_INLINE static void LoadStackPages(uintptr_t end_addr)
258 {
259 // ISO C++ forbids variable length array and alloca is unsafe,
260 // so we have to extend stack step by step via recursive call
261 constexpr size_t margin = 512;
262 constexpr size_t page_size = 4_KB;
263 // NOLINTNEXTLINE(modernize-avoid-c-arrays)
264 volatile uint8_t stack_buffer[page_size - margin];
265 if (ToUintPtr(&(stack_buffer[0])) >= end_addr + page_size) {
266 LoadStackPages(end_addr);
267 }
268 stack_buffer[0] = 0;
269 }
270
InitForStackOverflowCheck(size_t native_stack_reserved_size,size_t native_stack_protected_size)271 void ManagedThread::InitForStackOverflowCheck(size_t native_stack_reserved_size, size_t native_stack_protected_size)
272 {
273 void *stack_base = nullptr;
274 size_t guard_size;
275 size_t stack_size;
276 #if defined(PANDA_ASAN_ON) || defined(PANDA_TSAN_ON) || !defined(NDEBUG)
277 static constexpr size_t reserved_size = 64_KB;
278 #else
279 static constexpr size_t reserved_size = 8_KB;
280 #endif
281 static_assert(STACK_OVERFLOW_RESERVED_SIZE == reserved_size); // compiler depends on this to test load!!!
282 int error = os::thread::ThreadGetStackInfo(os::thread::GetNativeHandle(), &stack_base, &stack_size, &guard_size);
283 if (error != 0) {
284 LOG(ERROR, RUNTIME) << "InitForStackOverflowCheck: fail to get stack info, error = " << strerror(errno);
285 return;
286 }
287 if (guard_size < panda::os::mem::GetPageSize()) {
288 guard_size = panda::os::mem::GetPageSize();
289 }
290 if (stack_size <= native_stack_reserved_size + native_stack_protected_size + guard_size) {
291 LOG(ERROR, RUNTIME) << "InitForStackOverflowCheck: stack size not enough, stack_base = " << stack_base
292 << ", stack_size = " << stack_size << ", guard_size = " << guard_size;
293 return;
294 }
295 LOG(DEBUG, RUNTIME) << "InitForStackOverflowCheck: stack_base = " << stack_base << ", stack_size = " << stack_size
296 << ", guard_size = " << guard_size;
297 native_stack_begin_ = ToUintPtr(stack_base) + guard_size;
298 native_stack_end_ = native_stack_begin_ + native_stack_protected_size + native_stack_reserved_size;
299 native_stack_reserved_size_ = native_stack_reserved_size;
300 native_stack_protected_size_ = native_stack_protected_size;
301 native_stack_guard_size_ = guard_size;
302 native_stack_size_ = stack_size;
303 iframe_stack_size_ = stack_size; // init frame stack size same with native stack size
304 ProtectNativeStack();
305 }
306
ProtectNativeStack()307 void ManagedThread::ProtectNativeStack()
308 {
309 if (native_stack_protected_size_ == 0) {
310 return;
311 }
312
313 // Try to mprotect directly
314 if (!panda::os::mem::MakeMemProtected(ToVoidPtr(native_stack_begin_), native_stack_protected_size_)) {
315 return;
316 }
317
318 // If fail to mprotect, try to load stack page and then retry to mprotect
319 uintptr_t native_stack_top = AlignDown(GetStackTop(), panda::os::mem::GetPageSize());
320 LOG(DEBUG, RUNTIME) << "ProtectNativeStack: try to load pages, mprotect error = " << strerror(errno)
321 << ", stack_begin = " << native_stack_begin_ << ", stack_top = " << native_stack_top
322 << ", stack_size = " << native_stack_size_ << ", guard_size = " << native_stack_guard_size_;
323 if (native_stack_size_ > STACK_MAX_SIZE_OVERFLOW_CHECK || native_stack_end_ >= native_stack_top ||
324 native_stack_top > native_stack_end_ + STACK_MAX_SIZE_OVERFLOW_CHECK) {
325 LOG(ERROR, RUNTIME) << "ProtectNativeStack: too large stack, mprotect error = " << strerror(errno)
326 << ", max_stack_size = " << STACK_MAX_SIZE_OVERFLOW_CHECK
327 << ", stack_begin = " << native_stack_begin_ << ", stack_top = " << native_stack_top
328 << ", stack_size = " << native_stack_size_ << ", guard_size = " << native_stack_guard_size_;
329 return;
330 }
331 LoadStackPages(native_stack_begin_);
332 if (panda::os::mem::MakeMemProtected(ToVoidPtr(native_stack_begin_), native_stack_protected_size_)) {
333 LOG(ERROR, RUNTIME) << "ProtectNativeStack: fail to protect pages, error = " << strerror(errno)
334 << ", stack_begin = " << native_stack_begin_ << ", stack_top = " << native_stack_top
335 << ", stack_size = " << native_stack_size_ << ", guard_size = " << native_stack_guard_size_;
336 }
337 size_t release_size = native_stack_top - native_stack_begin_ - panda::os::mem::GetPageSize();
338 if (panda::os::mem::ReleasePages(native_stack_begin_, native_stack_begin_ + release_size) != 0) {
339 LOG(ERROR, RUNTIME) << "ProtectNativeStack: fail to release pages, error = " << strerror(errno)
340 << ", stack_begin = " << native_stack_begin_ << ", stack_top = " << native_stack_top
341 << ", stack_size = " << native_stack_size_ << ", guard_size = " << native_stack_guard_size_
342 << ", release_size = " << release_size;
343 }
344 }
345
DisableStackOverflowCheck()346 void ManagedThread::DisableStackOverflowCheck()
347 {
348 native_stack_end_ = native_stack_begin_;
349 iframe_stack_size_ = std::numeric_limits<size_t>::max();
350 if (native_stack_protected_size_ > 0) {
351 panda::os::mem::MakeMemReadWrite(ToVoidPtr(native_stack_begin_), native_stack_protected_size_);
352 }
353 }
354
EnableStackOverflowCheck()355 void ManagedThread::EnableStackOverflowCheck()
356 {
357 native_stack_end_ = native_stack_begin_ + native_stack_protected_size_ + native_stack_reserved_size_;
358 iframe_stack_size_ = native_stack_size_;
359 if (native_stack_protected_size_ > 0) {
360 panda::os::mem::MakeMemProtected(ToVoidPtr(native_stack_begin_), native_stack_protected_size_);
361 }
362 }
363
364 // NO_THREAD_SAFETY_ANALYSIS due to TSAN not being able to determine lock status
SuspendCheck()365 void ManagedThread::SuspendCheck() NO_THREAD_SAFETY_ANALYSIS
366 {
367 // We should use internal suspension to avoid missing call of IncSuspend
368 SuspendImpl(true);
369 Locks::mutator_lock->Unlock();
370 Locks::mutator_lock->ReadLock();
371 ResumeImpl(true);
372 }
373
SuspendImpl(bool internal_suspend)374 void ManagedThread::SuspendImpl(bool internal_suspend)
375 {
376 os::memory::LockHolder lock(suspend_lock_);
377 LOG(DEBUG, RUNTIME) << "Suspending thread " << GetId();
378 if (!internal_suspend) {
379 if (IsUserSuspended()) {
380 LOG(DEBUG, RUNTIME) << "thread " << GetId() << " is already suspended";
381 return;
382 }
383 user_code_suspend_count_++;
384 }
385 auto old_count = suspend_count_++;
386 if (old_count == 0) {
387 SetFlag(SUSPEND_REQUEST);
388 }
389 }
390
ResumeImpl(bool internal_resume)391 void ManagedThread::ResumeImpl(bool internal_resume)
392 {
393 os::memory::LockHolder lock(suspend_lock_);
394 LOG(DEBUG, RUNTIME) << "Resuming thread " << GetId();
395 if (!internal_resume) {
396 if (!IsUserSuspended()) {
397 LOG(DEBUG, RUNTIME) << "thread " << GetId() << " is already resumed";
398 return;
399 }
400 ASSERT(user_code_suspend_count_ != 0);
401 user_code_suspend_count_--;
402 }
403 if (suspend_count_ > 0) {
404 suspend_count_--;
405 if (suspend_count_ == 0) {
406 ClearFlag(SUSPEND_REQUEST);
407 }
408 }
409 // Help for UnregisterExitedThread
410 TSAN_ANNOTATE_HAPPENS_BEFORE(&fts_);
411 suspend_var_.Signal();
412 }
413
SafepointPoll()414 void ManagedThread::SafepointPoll()
415 {
416 if (this->TestAllFlags()) {
417 trace::ScopedTrace scoped_trace("RunSafepoint");
418 panda::interpreter::RuntimeInterface::Safepoint();
419 }
420 }
421
NativeCodeBegin()422 void ManagedThread::NativeCodeBegin()
423 {
424 LOG_IF(!(thread_frame_states_.empty() || thread_frame_states_.top() != NATIVE_CODE), FATAL, RUNTIME)
425 << LogThreadStack(NATIVE_CODE) << " or stack should be empty";
426 thread_frame_states_.push(NATIVE_CODE);
427 UpdateStatus(ThreadStatus::NATIVE);
428 is_managed_scope_ = false;
429 }
430
NativeCodeEnd()431 void ManagedThread::NativeCodeEnd()
432 {
433 // thread_frame_states_ should not be accessed without MutatorLock (as runtime could have been destroyed)
434 // If this was last frame, it should have been called from Destroy() and it should UpdateStatus to FINISHED
435 // after this method
436 UpdateStatus(ThreadStatus::RUNNING);
437 is_managed_scope_ = true;
438 LOG_IF(thread_frame_states_.empty(), FATAL, RUNTIME) << "stack should be not empty";
439 LOG_IF(thread_frame_states_.top() != NATIVE_CODE, FATAL, RUNTIME) << LogThreadStack(NATIVE_CODE);
440 thread_frame_states_.pop();
441 }
442
IsInNativeCode() const443 bool ManagedThread::IsInNativeCode() const
444 {
445 LOG_IF(HasClearStack(), FATAL, RUNTIME) << "stack should be not empty";
446 return thread_frame_states_.top() == NATIVE_CODE;
447 }
448
ManagedCodeBegin()449 void ManagedThread::ManagedCodeBegin()
450 {
451 // thread_frame_states_ should not be accessed without MutatorLock (as runtime could have been destroyed)
452 UpdateStatus(ThreadStatus::RUNNING);
453 is_managed_scope_ = true;
454 LOG_IF(HasClearStack(), FATAL, RUNTIME) << "stack should be not empty";
455 LOG_IF(thread_frame_states_.top() != NATIVE_CODE, FATAL, RUNTIME) << LogThreadStack(MANAGED_CODE);
456 thread_frame_states_.push(MANAGED_CODE);
457 }
458
ManagedCodeEnd()459 void ManagedThread::ManagedCodeEnd()
460 {
461 LOG_IF(HasClearStack(), FATAL, RUNTIME) << "stack should be not empty";
462 LOG_IF(thread_frame_states_.top() != MANAGED_CODE, FATAL, RUNTIME) << LogThreadStack(MANAGED_CODE);
463 thread_frame_states_.pop();
464 // Should be NATIVE_CODE
465 UpdateStatus(ThreadStatus::NATIVE);
466 is_managed_scope_ = false;
467 }
468
IsManagedCode() const469 bool ManagedThread::IsManagedCode() const
470 {
471 LOG_IF(HasClearStack(), FATAL, RUNTIME) << "stack should be not empty";
472 return thread_frame_states_.top() == MANAGED_CODE;
473 }
474
475 // Since we don't allow two consecutive NativeCode frames, there is no managed code on stack if
476 // its size is 1 and last frame is Native
HasManagedCodeOnStack() const477 bool ManagedThread::HasManagedCodeOnStack() const
478 {
479 if (HasClearStack()) {
480 return false;
481 }
482 if (thread_frame_states_.size() == 1 && IsInNativeCode()) {
483 return false;
484 }
485 return true;
486 }
487
HasClearStack() const488 bool ManagedThread::HasClearStack() const
489 {
490 return thread_frame_states_.empty();
491 }
492
ThreadStatusAsString(enum ThreadStatus status)493 PandaString ManagedThread::ThreadStatusAsString(enum ThreadStatus status)
494 {
495 switch (status) {
496 case ThreadStatus::CREATED:
497 return "New";
498 case ThreadStatus::RUNNING:
499 return "Runnable";
500 case ThreadStatus::IS_BLOCKED:
501 return "Blocked";
502 case ThreadStatus::IS_WAITING:
503 return "Waiting";
504 case ThreadStatus::IS_TIMED_WAITING:
505 return "Timed_waiting";
506 case ThreadStatus::IS_SUSPENDED:
507 return "Suspended";
508 case ThreadStatus::IS_COMPILER_WAITING:
509 return "Compiler_waiting";
510 case ThreadStatus::IS_WAITING_INFLATION:
511 return "Waiting_inflation";
512 case ThreadStatus::IS_SLEEPING:
513 return "Sleeping";
514 case ThreadStatus::IS_TERMINATED_LOOP:
515 return "Terminated_loop";
516 case ThreadStatus::TERMINATING:
517 return "Terminating";
518 case ThreadStatus::NATIVE:
519 return "Native";
520 case ThreadStatus::FINISHED:
521 return "Terminated";
522 default:
523 return "unknown";
524 }
525 }
526
LogThreadStack(ThreadState new_state) const527 PandaString ManagedThread::LogThreadStack(ThreadState new_state) const
528 {
529 PandaStringStream debug_message;
530 static std::unordered_map<ThreadState, std::string> thread_state_to_string_map = {
531 {ThreadState::NATIVE_CODE, "NATIVE_CODE"}, {ThreadState::MANAGED_CODE, "MANAGED_CODE"}};
532 auto new_state_it = thread_state_to_string_map.find(new_state);
533 auto top_frame_it = thread_state_to_string_map.find(thread_frame_states_.top());
534 ASSERT(new_state_it != thread_state_to_string_map.end());
535 ASSERT(top_frame_it != thread_state_to_string_map.end());
536
537 debug_message << "threadId: " << GetId() << " "
538 << "tried go to " << new_state_it->second << " state, but last frame is: " << top_frame_it->second
539 << ", " << thread_frame_states_.size() << " frames in stack (from up to bottom): [";
540
541 PandaStack<ThreadState> copy_stack(thread_frame_states_);
542 while (!copy_stack.empty()) {
543 auto it = thread_state_to_string_map.find(copy_stack.top());
544 ASSERT(it != thread_state_to_string_map.end());
545 debug_message << it->second;
546 if (copy_stack.size() > 1) {
547 debug_message << "|";
548 }
549 copy_stack.pop();
550 }
551 debug_message << "]";
552 return debug_message.str();
553 }
554
MTManagedThread(ThreadId id,mem::InternalAllocatorPtr allocator,PandaVM * panda_vm,panda::panda_file::SourceLang thread_lang)555 MTManagedThread::MTManagedThread(ThreadId id, mem::InternalAllocatorPtr allocator, PandaVM *panda_vm,
556 panda::panda_file::SourceLang thread_lang)
557 : ManagedThread(id, allocator, panda_vm, Thread::ThreadType::THREAD_TYPE_MT_MANAGED, thread_lang),
558 waiting_monitor_(nullptr),
559 entering_monitor_(nullptr)
560 {
561 ASSERT(panda_vm != nullptr);
562 internal_id_ = GetVM()->GetThreadManager()->GetInternalThreadId();
563
564 auto ext = Runtime::GetCurrent()->GetClassLinker()->GetExtension(GetThreadLang());
565 if (ext != nullptr) {
566 string_class_ptr_ = ext->GetClassRoot(ClassRoot::STRING);
567 }
568
569 auto *rs = allocator->New<mem::ReferenceStorage>(panda_vm->GetGlobalObjectStorage(), allocator, false);
570 LOG_IF((rs == nullptr || !rs->Init()), FATAL, RUNTIME) << "Cannot create pt reference storage";
571 pt_reference_storage_ = PandaUniquePtr<mem::ReferenceStorage>(rs);
572 }
573
~MTManagedThread()574 MTManagedThread::~MTManagedThread()
575 {
576 ASSERT(internal_id_ != 0);
577 GetVM()->GetThreadManager()->RemoveInternalThreadId(internal_id_);
578 }
579
PushLocalObject(ObjectHeader ** object_header)580 void ManagedThread::PushLocalObject(ObjectHeader **object_header)
581 {
582 #ifdef PANDA_WITH_ECMASCRIPT
583 // Object handles can be created during class initialization, so check lock state only after GC is started.
584 ASSERT(!ManagedThread::GetCurrent()->GetVM()->GetGC()->IsGCRunning() ||
585 (Locks::mutator_lock->GetState() != MutatorLock::MutatorLockState::UNLOCKED) ||
586 this->GetThreadLang() == panda::panda_file::SourceLang::ECMASCRIPT);
587 #else
588 ASSERT(!ManagedThread::GetCurrent()->GetVM()->GetGC()->IsGCRunning() ||
589 (Locks::mutator_lock->GetState() != MutatorLock::MutatorLockState::UNLOCKED));
590 #endif
591
592 local_objects_.push_back(object_header);
593 LOG(DEBUG, GC) << "PushLocalObject for thread " << std::hex << this << ", obj = " << *object_header;
594 }
595
PopLocalObject()596 void ManagedThread::PopLocalObject()
597 {
598 #ifdef PANDA_WITH_ECMASCRIPT
599 // Object handles can be created during class initialization, so check lock state only after GC is started.
600 ASSERT(!ManagedThread::GetCurrent()->GetVM()->GetGC()->IsGCRunning() ||
601 (Locks::mutator_lock->GetState() != MutatorLock::MutatorLockState::UNLOCKED) ||
602 this->GetThreadLang() == panda::panda_file::SourceLang::ECMASCRIPT);
603 #else
604 ASSERT(!ManagedThread::GetCurrent()->GetVM()->GetGC()->IsGCRunning() ||
605 (Locks::mutator_lock->GetState() != MutatorLock::MutatorLockState::UNLOCKED));
606 #endif
607
608 ASSERT(!local_objects_.empty());
609 LOG(DEBUG, GC) << "PopLocalObject from thread " << std::hex << this << ", obj = " << *local_objects_.back();
610 local_objects_.pop_back();
611 }
612
PushLocalObjectLocked(ObjectHeader * obj)613 void MTManagedThread::PushLocalObjectLocked(ObjectHeader *obj)
614 {
615 local_objects_locked_.emplace_back(obj, GetFrame());
616 }
617
PopLocalObjectLocked(ObjectHeader * out)618 void MTManagedThread::PopLocalObjectLocked([[maybe_unused]] ObjectHeader *out)
619 {
620 if (LIKELY(!local_objects_locked_.empty())) {
621 #ifndef NDEBUG
622 ObjectHeader *obj = local_objects_locked_.back().GetObject();
623 if (obj != out) {
624 LOG(WARNING, RUNTIME) << "Locked object is not paired";
625 }
626 #endif // !NDEBUG
627 local_objects_locked_.pop_back();
628 } else {
629 LOG(WARNING, RUNTIME) << "PopLocalObjectLocked failed, current thread locked object is empty";
630 }
631 }
632
GetLockedObjectInfos()633 Span<LockedObjectInfo> MTManagedThread::GetLockedObjectInfos()
634 {
635 return local_objects_locked_.data();
636 }
637
UpdateTLAB(mem::TLAB * tlab)638 void ManagedThread::UpdateTLAB(mem::TLAB *tlab)
639 {
640 ASSERT(tlab_ != nullptr);
641 ASSERT(tlab != nullptr);
642 tlab_ = tlab;
643 }
644
ClearTLAB()645 void ManagedThread::ClearTLAB()
646 {
647 ASSERT(zero_tlab != nullptr);
648 tlab_ = zero_tlab;
649 }
650
651 /* Common actions for creation of the thread. */
ProcessCreatedThread()652 void MTManagedThread::ProcessCreatedThread()
653 {
654 ManagedThread::SetCurrent(this);
655 // Runtime takes ownership of the thread
656 trace::ScopedTrace scoped_trace2("ThreadManager::RegisterThread");
657 GetVM()->GetThreadManager()->RegisterThread(this);
658 NativeCodeBegin();
659 }
660
UpdateGCRoots()661 void ManagedThread::UpdateGCRoots()
662 {
663 if ((exception_ != nullptr) && (exception_->IsForwarded())) {
664 exception_ = ::panda::mem::GetForwardAddress(exception_);
665 }
666 for (auto &&it : local_objects_) {
667 if ((*it)->IsForwarded()) {
668 (*it) = ::panda::mem::GetForwardAddress(*it);
669 }
670 }
671
672 if (!tagged_handle_scopes_.empty()) {
673 tagged_handle_storage_->UpdateHeapObject();
674 tagged_global_handle_storage_->UpdateHeapObject();
675 }
676
677 if (!object_header_handle_scopes_.empty()) {
678 object_header_handle_storage_->UpdateHeapObject();
679 }
680 }
681
682 /* return true if sleep is interrupted */
Sleep(uint64_t ms)683 bool MTManagedThread::Sleep(uint64_t ms)
684 {
685 auto thread = MTManagedThread::GetCurrent();
686 bool is_interrupted = thread->IsInterrupted();
687 if (!is_interrupted) {
688 thread->TimedWait(ThreadStatus::IS_SLEEPING, ms, 0);
689 is_interrupted = thread->IsInterrupted();
690 }
691 return is_interrupted;
692 }
693
SetThreadPriority(int32_t prio)694 void ManagedThread::SetThreadPriority(int32_t prio)
695 {
696 ThreadId tid = GetId();
697 int res = os::thread::SetPriority(tid, prio);
698 if (!os::thread::IsSetPriorityError(res)) {
699 LOG(DEBUG, RUNTIME) << "Successfully changed priority for thread " << tid << " to " << prio;
700 } else {
701 LOG(DEBUG, RUNTIME) << "Cannot change priority for thread " << tid << " to " << prio;
702 }
703 }
704
GetThreadPriority()705 uint32_t ManagedThread::GetThreadPriority()
706 {
707 ThreadId tid = GetId();
708 return os::thread::GetPriority(tid);
709 }
710
UpdateGCRoots()711 void MTManagedThread::UpdateGCRoots()
712 {
713 ManagedThread::UpdateGCRoots();
714 for (auto &it : local_objects_locked_.data()) {
715 if (it.GetObject()->IsForwarded()) {
716 it.SetObject(panda::mem::GetForwardAddress(it.GetObject()));
717 }
718 }
719
720 // Update enter_monitor_object_
721 if (enter_monitor_object_ != nullptr && enter_monitor_object_->IsForwarded()) {
722 enter_monitor_object_ = panda::mem::GetForwardAddress(enter_monitor_object_);
723 }
724
725 pt_reference_storage_->UpdateMovedRefs();
726 }
727
VisitGCRoots(const ObjectVisitor & cb)728 void MTManagedThread::VisitGCRoots(const ObjectVisitor &cb)
729 {
730 ManagedThread::VisitGCRoots(cb);
731
732 // Visit enter_monitor_object_
733 if (enter_monitor_object_ != nullptr) {
734 cb(enter_monitor_object_);
735 }
736
737 pt_reference_storage_->VisitObjects([&cb](const mem::GCRoot &gc_root) { cb(gc_root.GetObjectHeader()); },
738 mem::RootType::ROOT_PT_LOCAL);
739 }
SetDaemon()740 void MTManagedThread::SetDaemon()
741 {
742 is_daemon_ = true;
743 GetVM()->GetThreadManager()->AddDaemonThread();
744 SetThreadPriority(MIN_PRIORITY);
745 }
746
Interrupt(MTManagedThread * thread)747 void MTManagedThread::Interrupt(MTManagedThread *thread)
748 {
749 os::memory::LockHolder lock(thread->cond_lock_);
750 LOG(DEBUG, RUNTIME) << "Interrupt a thread " << thread->GetId();
751 thread->SetInterruptedWithLockHeld(true);
752 thread->SignalWithLockHeld();
753 thread->InterruptPostImpl();
754 }
755
Interrupted()756 bool MTManagedThread::Interrupted()
757 {
758 os::memory::LockHolder lock(cond_lock_);
759 bool res = IsInterruptedWithLockHeld();
760 SetInterruptedWithLockHeld(false);
761 return res;
762 }
763
StopDaemonThread()764 void MTManagedThread::StopDaemonThread()
765 {
766 SetRuntimeTerminated();
767 MTManagedThread::Interrupt(this);
768 }
769
VisitGCRoots(const ObjectVisitor & cb)770 void ManagedThread::VisitGCRoots(const ObjectVisitor &cb)
771 {
772 if (exception_ != nullptr) {
773 cb(exception_);
774 }
775 for (auto it : local_objects_) {
776 cb(*it);
777 }
778
779 if (!tagged_handle_scopes_.empty()) {
780 tagged_handle_storage_->VisitGCRoots(cb);
781 tagged_global_handle_storage_->VisitGCRoots(cb);
782 }
783 if (!object_header_handle_scopes_.empty()) {
784 object_header_handle_storage_->VisitGCRoots(cb);
785 }
786 }
787
Destroy()788 void MTManagedThread::Destroy()
789 {
790 ASSERT(this == ManagedThread::GetCurrent());
791 ASSERT(GetStatus() != ThreadStatus::FINISHED);
792
793 UpdateStatus(ThreadStatus::TERMINATING); // Set this status to prevent runtime for destroying itself while this
794 // NATTIVE thread
795 // is trying to acquire runtime.
796 ReleaseMonitors();
797 if (!IsDaemon()) {
798 Runtime *runtime = Runtime::GetCurrent();
799 runtime->GetNotificationManager()->ThreadEndEvent(this);
800 }
801
802 if (GetVM()->GetThreadManager()->UnregisterExitedThread(this)) {
803 // Clear current_thread only if unregistration was successfull
804 ManagedThread::SetCurrent(nullptr);
805 }
806 }
807
GetCustomTLSData(const char * key)808 CustomTLSData *ManagedThread::GetCustomTLSData(const char *key)
809 {
810 os::memory::LockHolder lock(*Locks::custom_tls_lock);
811 auto it = custom_tls_cache_.find(key);
812 if (it == custom_tls_cache_.end()) {
813 return nullptr;
814 }
815 return it->second.get();
816 }
817
SetCustomTLSData(const char * key,CustomTLSData * data)818 void ManagedThread::SetCustomTLSData(const char *key, CustomTLSData *data)
819 {
820 os::memory::LockHolder lock(*Locks::custom_tls_lock);
821 PandaUniquePtr<CustomTLSData> tls_data(data);
822 auto it = custom_tls_cache_.find(key);
823 if (it == custom_tls_cache_.end()) {
824 custom_tls_cache_[key] = {PandaUniquePtr<CustomTLSData>()};
825 }
826 custom_tls_cache_[key].swap(tls_data);
827 }
828
EraseCustomTLSData(const char * key)829 bool ManagedThread::EraseCustomTLSData(const char *key)
830 {
831 os::memory::LockHolder lock(*Locks::custom_tls_lock);
832 return custom_tls_cache_.erase(key) != 0;
833 }
834
GetLanguageContext()835 LanguageContext ManagedThread::GetLanguageContext()
836 {
837 return Runtime::GetCurrent()->GetLanguageContext(thread_lang_);
838 }
839
FreeInternalMemory()840 void MTManagedThread::FreeInternalMemory()
841 {
842 local_objects_locked_.~LockedObjectList<>();
843 pt_reference_storage_.reset();
844
845 ManagedThread::FreeInternalMemory();
846 }
847
DestroyInternalResources()848 void ManagedThread::DestroyInternalResources()
849 {
850 GetVM()->GetGC()->OnThreadTerminate(this);
851 ASSERT(pre_buff_ == nullptr);
852 ASSERT(g1_post_barrier_ring_buffer_ == nullptr);
853 pt_thread_info_->Destroy();
854 }
855
FreeInternalMemory()856 void ManagedThread::FreeInternalMemory()
857 {
858 thread_frame_states_.~PandaStack<ThreadState>();
859 DestroyInternalResources();
860
861 local_objects_.~PandaVector<ObjectHeader **>();
862 {
863 os::memory::LockHolder lock(*Locks::custom_tls_lock);
864 custom_tls_cache_.~PandaMap<const char *, PandaUniquePtr<CustomTLSData>>();
865 }
866
867 mem::InternalAllocatorPtr allocator = Runtime::GetCurrent()->GetInternalAllocator();
868 allocator->Delete(stack_frame_allocator_);
869 allocator->Delete(internal_local_allocator_);
870
871 allocator->Delete(pt_thread_info_.release());
872
873 tagged_handle_scopes_.~PandaVector<HandleScope<coretypes::TaggedType> *>();
874 allocator->Delete(tagged_handle_storage_);
875 allocator->Delete(tagged_global_handle_storage_);
876
877 allocator->Delete(object_header_handle_storage_);
878 object_header_handle_scopes_.~PandaVector<HandleScope<ObjectHeader *> *>();
879
880 Thread::FreeInternalMemory();
881 }
882
PrintSuspensionStackIfNeeded()883 void ManagedThread::PrintSuspensionStackIfNeeded()
884 {
885 if (!Runtime::GetOptions().IsSafepointBacktrace()) {
886 return;
887 }
888 PandaStringStream out;
889 out << "Thread " << GetId() << " is suspended at\n";
890 PrintStack(out);
891 LOG(INFO, RUNTIME) << out.str();
892 }
893
894 } // namespace panda
895