1 /**
2 * Copyright (c) 2024-2025 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "runtime/include/thread-inl.h"
17 #include "libpandabase/os/stacktrace.h"
18 #include "runtime/handle_base-inl.h"
19 #include "runtime/include/locks.h"
20 #include "runtime/include/object_header-inl.h"
21 #include "runtime/include/panda_vm.h"
22 #include "runtime/include/runtime.h"
23 #include "runtime/include/runtime_notification.h"
24 #include "runtime/include/stack_walker.h"
25 #include "runtime/include/thread_scopes.h"
26 #include "runtime/interpreter/runtime_interface.h"
27 #include "runtime/handle_scope-inl.h"
28 #include "runtime/mem/object_helpers.h"
29 #include "tooling/pt_thread_info.h"
30 #include "runtime/mem/runslots_allocator-inl.h"
31
32 namespace ark {
33 using TaggedValue = coretypes::TaggedValue;
34 using TaggedType = coretypes::TaggedType;
35
36 mem::TLAB *ManagedThread::zeroTlab_ = nullptr;
37 static const int MIN_PRIORITY = os::thread::LOWEST_PRIORITY;
38
GetInternalAllocator(Thread * thread)39 static mem::InternalAllocatorPtr GetInternalAllocator(Thread *thread)
40 {
41 // WORKAROUND(v.cherkashin): EcmaScript side build doesn't have HeapManager, so we get internal allocator from
42 // runtime
43 mem::HeapManager *heapManager = thread->GetVM()->GetHeapManager();
44 if (heapManager != nullptr) {
45 return heapManager->GetInternalAllocator();
46 }
47 return Runtime::GetCurrent()->GetInternalAllocator();
48 }
49
GetInternalId()50 MTManagedThread::ThreadId MTManagedThread::GetInternalId()
51 {
52 ASSERT(internalId_ != 0);
53 return internalId_;
54 }
55
~Thread()56 Thread::~Thread()
57 {
58 FreeAllocatedMemory();
59 }
60
FreeInternalMemory()61 void Thread::FreeInternalMemory()
62 {
63 FreeAllocatedMemory();
64 }
65
FreeAllocatedMemory()66 void Thread::FreeAllocatedMemory()
67 {
68 auto allocator = Runtime::GetCurrent()->GetInternalAllocator();
69 ASSERT(allocator != nullptr);
70 allocator->Delete(preBuff_);
71 preBuff_ = nullptr;
72
73 #ifdef PANDA_USE_CUSTOM_SIGNAL_STACK
74 allocator->Free(signalStack_.ss_sp);
75 #endif
76 }
77
Thread(PandaVM * vm,ThreadType threadType)78 Thread::Thread(PandaVM *vm, ThreadType threadType) : ThreadProxy(vm->GetMutatorLock()), vm_(vm), threadType_(threadType)
79 {
80 // WORKAROUND(v.cherkashin): EcmaScript side build doesn't have GC, so we skip setting barriers for this case
81 mem::GC *gc = vm->GetGC();
82 if (gc != nullptr) {
83 barrierSet_ = vm->GetGC()->GetBarrierSet();
84 InitCardTableData(barrierSet_);
85 }
86 InitializeThreadFlag();
87
88 #ifdef PANDA_USE_CUSTOM_SIGNAL_STACK
89 mem::InternalAllocatorPtr allocator = Runtime::GetCurrent()->GetInternalAllocator();
90 signalStack_.ss_sp = allocator->Alloc(SIGSTKSZ * 8U);
91 signalStack_.ss_size = SIGSTKSZ * 8U;
92 signalStack_.ss_flags = 0;
93 sigaltstack(&signalStack_, nullptr);
94 #endif
95 }
96
InitCardTableData(mem::GCBarrierSet * barrier)97 void Thread::InitCardTableData(mem::GCBarrierSet *barrier)
98 {
99 auto postBarrierType = barrier->GetPostType();
100 switch (postBarrierType) {
101 case ark::mem::BarrierType::POST_INTERGENERATIONAL_BARRIER:
102 cardTableMinAddr_ = std::get<void *>(barrier->GetPostBarrierOperand("MIN_ADDR").GetValue());
103 cardTableAddr_ = std::get<uint8_t *>(barrier->GetPostBarrierOperand("CARD_TABLE_ADDR").GetValue());
104 postWrbOneObject_ = reinterpret_cast<void *>(PostInterGenerationalBarrier1);
105 postWrbTwoObjects_ = reinterpret_cast<void *>(PostInterGenerationalBarrier2);
106 break;
107 case ark::mem::BarrierType::POST_INTERREGION_BARRIER:
108 cardTableAddr_ = std::get<uint8_t *>(barrier->GetPostBarrierOperand("CARD_TABLE_ADDR").GetValue());
109 cardTableMinAddr_ = std::get<void *>(barrier->GetPostBarrierOperand("MIN_ADDR").GetValue());
110 postWrbOneObject_ = reinterpret_cast<void *>(PostInterRegionBarrierMarkSingleFast);
111 postWrbTwoObjects_ = reinterpret_cast<void *>(PostInterRegionBarrierMarkPairFast);
112 break;
113 case ark::mem::BarrierType::POST_WRB_NONE:
114 postWrbOneObject_ = reinterpret_cast<void *>(EmptyPostWriteBarrier);
115 postWrbTwoObjects_ = reinterpret_cast<void *>(EmptyPostWriteBarrier);
116 break;
117 case mem::POST_RB_NONE:
118 break;
119 case mem::PRE_WRB_NONE:
120 case mem::PRE_RB_NONE:
121 case mem::PRE_SATB_BARRIER:
122 LOG(FATAL, RUNTIME) << "Post barrier expected";
123 break;
124 }
125 }
126
InitPreBuff()127 void Thread::InitPreBuff()
128 {
129 auto allocator = GetInternalAllocator(this);
130 mem::GC *gc = GetVM()->GetGC();
131 auto barrier = gc->GetBarrierSet();
132 if (barrier->GetPreType() != ark::mem::BarrierType::PRE_WRB_NONE) {
133 preBuff_ = allocator->New<PandaVector<ObjectHeader *>>();
134 }
135 }
136
137 /* static */
Initialize()138 void ManagedThread::Initialize()
139 {
140 ASSERT(!Thread::GetCurrent());
141 ASSERT(!zeroTlab_);
142 mem::InternalAllocatorPtr allocator = Runtime::GetCurrent()->GetInternalAllocator();
143 zeroTlab_ = allocator->New<mem::TLAB>(nullptr, 0U);
144 InitializeInitThreadFlag();
145 }
146
147 /* static */
Shutdown()148 void ManagedThread::Shutdown()
149 {
150 ASSERT(zeroTlab_);
151 ManagedThread::SetCurrent(nullptr);
152 mem::InternalAllocatorPtr allocator = Runtime::GetCurrent()->GetInternalAllocator();
153 allocator->Delete(zeroTlab_);
154 zeroTlab_ = nullptr;
155 /* @sync 1
156 * @description: Runtime is terminated at this point and we cannot create new threads
157 * */
158 }
159
160 /* static */
Yield()161 void MTManagedThread::Yield()
162 {
163 LOG(DEBUG, RUNTIME) << "Reschedule the execution of a current thread";
164 os::thread::Yield();
165 }
166
167 /* static - creation of the initial Managed thread */
Create(Runtime * runtime,PandaVM * vm,ark::panda_file::SourceLang threadLang)168 ManagedThread *ManagedThread::Create(Runtime *runtime, PandaVM *vm, ark::panda_file::SourceLang threadLang)
169 {
170 trace::ScopedTrace scopedTrace("ManagedThread::Create");
171 mem::InternalAllocatorPtr allocator = runtime->GetInternalAllocator();
172 // Create thread structure using new, we rely on this structure to be accessible in child threads after
173 // runtime is destroyed
174 return new ManagedThread(os::thread::GetCurrentThreadId(), allocator, vm, Thread::ThreadType::THREAD_TYPE_MANAGED,
175 threadLang);
176 }
177
178 /* static - creation of the initial MT Managed thread */
Create(Runtime * runtime,PandaVM * vm,ark::panda_file::SourceLang threadLang)179 MTManagedThread *MTManagedThread::Create(Runtime *runtime, PandaVM *vm, ark::panda_file::SourceLang threadLang)
180 {
181 trace::ScopedTrace scopedTrace("MTManagedThread::Create");
182 mem::InternalAllocatorPtr allocator = runtime->GetInternalAllocator();
183 // Create thread structure using new, we rely on this structure to be accessible in child threads after
184 // runtime is destroyed
185 auto thread = new MTManagedThread(os::thread::GetCurrentThreadId(), allocator, vm, threadLang);
186 thread->ProcessCreatedThread();
187
188 runtime->GetNotificationManager()->ThreadStartEvent(thread);
189
190 return thread;
191 }
192
ManagedThread(ThreadId id,mem::InternalAllocatorPtr allocator,PandaVM * pandaVm,Thread::ThreadType threadType,ark::panda_file::SourceLang threadLang)193 ManagedThread::ManagedThread(ThreadId id, mem::InternalAllocatorPtr allocator, PandaVM *pandaVm,
194 Thread::ThreadType threadType, ark::panda_file::SourceLang threadLang)
195 : Thread(pandaVm, threadType),
196 id_(id),
197 threadLang_(threadLang),
198 ptThreadInfo_(allocator->New<tooling::PtThreadInfo>()),
199 threadFrameStates_(allocator->Adapter())
200 {
201 ASSERT(zeroTlab_ != nullptr);
202 tlab_ = zeroTlab_;
203
204 // WORKAROUND(v.cherkashin): EcmaScript side build doesn't have GC, so we skip setting barriers for this case
205 mem::GC *gc = pandaVm->GetGC();
206 if (gc != nullptr) {
207 preBarrierType_ = gc->GetBarrierSet()->GetPreType();
208 postBarrierType_ = gc->GetBarrierSet()->GetPostType();
209 auto barrierSet = gc->GetBarrierSet();
210 if (barrierSet->GetPreType() != ark::mem::BarrierType::PRE_WRB_NONE) {
211 preBuff_ = allocator->New<PandaVector<ObjectHeader *>>();
212 // need to initialize in constructor because we have barriers between constructor and InitBuffers in
213 // InitializedClasses
214 g1PostBarrierRingBuffer_ = allocator->New<mem::GCG1BarrierSet::G1PostBarrierRingBufferType>();
215 }
216 }
217
218 stackFrameAllocator_ =
219 allocator->New<mem::StackFrameAllocator>(Runtime::GetOptions().UseMallocForInternalAllocations());
220 internalLocalAllocator_ =
221 mem::InternalAllocator<>::SetUpLocalInternalAllocator(static_cast<mem::Allocator *>(allocator));
222 taggedHandleStorage_ = allocator->New<HandleStorage<TaggedType>>(allocator);
223 taggedGlobalHandleStorage_ = allocator->New<GlobalHandleStorage<TaggedType>>(allocator);
224 objectHeaderHandleStorage_ = allocator->New<HandleStorage<ObjectHeader *>>(allocator);
225 if (Runtime::GetOptions().IsAdaptiveTlabSize()) {
226 constexpr size_t MAX_GROW_RATIO = 2;
227 constexpr float WEIGHT = 0.5;
228 constexpr float DESIRED_FILL_FRACTION = 0.9;
229 size_t initTlabSize = Runtime::GetOptions().GetInitTlabSize();
230 size_t maxTlabSize = Runtime::GetOptions().GetMaxTlabSize();
231 if (initTlabSize < 4_KB) {
232 LOG(FATAL, RUNTIME) << "Initial TLAB size must be greater than 4Kb";
233 }
234 if (initTlabSize > maxTlabSize) {
235 LOG(FATAL, RUNTIME) << "Initial TLAB size must be less or equal to max TLAB size";
236 }
237 weightedAdaptiveTlabAverage_ = allocator->New<WeightedAdaptiveTlabAverage>(
238 initTlabSize, maxTlabSize, MAX_GROW_RATIO, WEIGHT, DESIRED_FILL_FRACTION);
239 }
240 }
241
~ManagedThread()242 ManagedThread::~ManagedThread()
243 {
244 // ManagedThread::ShutDown() may not be called when exiting js_thread, so need set current_thread = nullptr
245 // NB! ThreadManager is expected to store finished threads in separate list and GC destroys them,
246 // current_thread should be nullified in Destroy()
247 // We should register TLAB size for MemStats during thread destroy.
248 // (zero_tlab == nullptr means that we destroyed Runtime and do not need to register TLAB)
249 if (zeroTlab_ != nullptr) {
250 ASSERT(tlab_ == zeroTlab_);
251 }
252
253 mem::InternalAllocatorPtr allocator = GetInternalAllocator(this);
254 allocator->Delete(objectHeaderHandleStorage_);
255 allocator->Delete(taggedGlobalHandleStorage_);
256 allocator->Delete(taggedHandleStorage_);
257 allocator->Delete(weightedAdaptiveTlabAverage_);
258 mem::InternalAllocator<>::FinalizeLocalInternalAllocator(internalLocalAllocator_,
259 static_cast<mem::Allocator *>(allocator));
260 internalLocalAllocator_ = nullptr;
261 allocator->Delete(stackFrameAllocator_);
262 allocator->Delete(ptThreadInfo_.release());
263
264 ASSERT(threadFrameStates_.empty() && "stack should be empty");
265 }
266
InitBuffers()267 void ManagedThread::InitBuffers()
268 {
269 auto allocator = GetInternalAllocator(this);
270 mem::GC *gc = GetVM()->GetGC();
271 auto barrier = gc->GetBarrierSet();
272 if (barrier->GetPreType() != ark::mem::BarrierType::PRE_WRB_NONE) {
273 // we need to recreate buffers if it was detach (we removed all structures) and attach again
274 // skip initializing in first attach after constructor
275 if (preBuff_ == nullptr) {
276 ASSERT(preBuff_ == nullptr);
277 preBuff_ = allocator->New<PandaVector<ObjectHeader *>>();
278 ASSERT(g1PostBarrierRingBuffer_ == nullptr);
279 g1PostBarrierRingBuffer_ = allocator->New<mem::GCG1BarrierSet::G1PostBarrierRingBufferType>();
280 }
281 }
282 }
283
GetStackTop()284 NO_INLINE static uintptr_t GetStackTop()
285 {
286 return ToUintPtr(__builtin_frame_address(0));
287 }
288
LoadStackPages(uintptr_t endAddr)289 NO_INLINE static void LoadStackPages(uintptr_t endAddr)
290 {
291 // ISO C++ forbids variable length array and alloca is unsafe,
292 // so we have to extend stack step by step via recursive call
293 constexpr size_t MARGIN = 512;
294 constexpr size_t STACK_PAGE_SIZE = 4_KB;
295 // NOLINTNEXTLINE(modernize-avoid-c-arrays)
296 volatile uint8_t stackBuffer[STACK_PAGE_SIZE - MARGIN];
297 if (ToUintPtr(&(stackBuffer[0])) >= endAddr + STACK_PAGE_SIZE) {
298 LoadStackPages(endAddr);
299 }
300 stackBuffer[0] = 0;
301 }
302
RetrieveStackInfo(void * & stackAddr,size_t & stackSize,size_t & guardSize)303 bool ManagedThread::RetrieveStackInfo(void *&stackAddr, size_t &stackSize, size_t &guardSize)
304 {
305 int error = os::thread::ThreadGetStackInfo(os::thread::GetNativeHandle(), &stackAddr, &stackSize, &guardSize);
306 if (error != 0) {
307 LOG(ERROR, RUNTIME) << "RetrieveStackInfo: fail to get stack info, error = " << strerror(errno);
308 }
309 return error == 0;
310 }
311
InitForStackOverflowCheck(size_t nativeStackReservedSize,size_t nativeStackProtectedSize)312 void ManagedThread::InitForStackOverflowCheck(size_t nativeStackReservedSize, size_t nativeStackProtectedSize)
313 {
314 void *stackBase = nullptr;
315 size_t guardSize;
316 size_t stackSize;
317 #if defined(PANDA_ASAN_ON) || defined(PANDA_TSAN_ON) || !defined(NDEBUG)
318 static constexpr size_t RESERVED_SIZE = 64_KB;
319 #else
320 static constexpr size_t RESERVED_SIZE = 12_KB;
321 #endif
322 static_assert(STACK_OVERFLOW_RESERVED_SIZE == RESERVED_SIZE); // compiler depends on this to test load!!!
323 if (!RetrieveStackInfo(stackBase, stackSize, guardSize)) {
324 return;
325 }
326 if (guardSize < ark::os::mem::GetPageSize()) {
327 guardSize = ark::os::mem::GetPageSize();
328 }
329 if (stackSize <= nativeStackReservedSize + nativeStackProtectedSize + guardSize) {
330 LOG(ERROR, RUNTIME) << "InitForStackOverflowCheck: stack size not enough, stack_base = " << stackBase
331 << ", stack_size = " << stackSize << ", guard_size = " << guardSize;
332 return;
333 }
334 LOG(DEBUG, RUNTIME) << "InitForStackOverflowCheck: stack_base = " << stackBase << ", stack_size = " << stackSize
335 << ", guard_size = " << guardSize;
336 nativeStackBegin_ = ToUintPtr(stackBase) + guardSize;
337 nativeStackEnd_ = nativeStackBegin_ + nativeStackProtectedSize + nativeStackReservedSize;
338 nativeStackReservedSize_ = nativeStackReservedSize;
339 nativeStackProtectedSize_ = nativeStackProtectedSize;
340 nativeStackGuardSize_ = guardSize;
341 nativeStackSize_ = stackSize;
342 // init frame stack size same with native stack size (*4 - is just an heuristic to pass some tests)
343 // But frame stack size cannot be larger than max memory size in frame allocator
344 auto iframeStackSize = stackSize * 4;
345 auto allocatorMaxSize = stackFrameAllocator_->GetFullMemorySize();
346 iframeStackSize_ = iframeStackSize <= allocatorMaxSize ? iframeStackSize : allocatorMaxSize;
347 ProtectNativeStack();
348 stackFrameAllocator_->SetReservedMemorySize(iframeStackSize_);
349 stackFrameAllocator_->ReserveMemory();
350 }
351
ProtectNativeStack()352 void ManagedThread::ProtectNativeStack()
353 {
354 if (nativeStackProtectedSize_ == 0) {
355 return;
356 }
357
358 // Try to mprotect directly
359 if (!ark::os::mem::MakeMemProtected(ToVoidPtr(nativeStackBegin_), nativeStackProtectedSize_)) {
360 return;
361 }
362
363 // If fail to mprotect, try to load stack page and then retry to mprotect
364 uintptr_t nativeStackTop = AlignDown(GetStackTop(), ark::os::mem::GetPageSize());
365 LOG(DEBUG, RUNTIME) << "ProtectNativeStack: try to load pages, mprotect error = " << strerror(errno)
366 << ", stack_begin = " << nativeStackBegin_ << ", stack_top = " << nativeStackTop
367 << ", stack_size = " << nativeStackSize_ << ", guard_size = " << nativeStackGuardSize_;
368 if (nativeStackSize_ > STACK_MAX_SIZE_OVERFLOW_CHECK || nativeStackEnd_ >= nativeStackTop ||
369 nativeStackTop > nativeStackEnd_ + STACK_MAX_SIZE_OVERFLOW_CHECK) {
370 LOG(ERROR, RUNTIME) << "ProtectNativeStack: too large stack, mprotect error = " << strerror(errno)
371 << ", max_stack_size = " << STACK_MAX_SIZE_OVERFLOW_CHECK
372 << ", stack_begin = " << nativeStackBegin_ << ", stack_top = " << nativeStackTop
373 << ", stack_size = " << nativeStackSize_ << ", guard_size = " << nativeStackGuardSize_;
374 return;
375 }
376 LoadStackPages(nativeStackBegin_);
377 if (ark::os::mem::MakeMemProtected(ToVoidPtr(nativeStackBegin_), nativeStackProtectedSize_)) {
378 LOG(ERROR, RUNTIME) << "ProtectNativeStack: fail to protect pages, error = " << strerror(errno)
379 << ", stack_begin = " << nativeStackBegin_ << ", stack_top = " << nativeStackTop
380 << ", stack_size = " << nativeStackSize_ << ", guard_size = " << nativeStackGuardSize_;
381 }
382 size_t releaseSize = nativeStackTop - nativeStackBegin_ - ark::os::mem::GetPageSize();
383 if (ark::os::mem::ReleasePages(nativeStackBegin_, nativeStackBegin_ + releaseSize) != 0) {
384 LOG(ERROR, RUNTIME) << "ProtectNativeStack: fail to release pages, error = " << strerror(errno)
385 << ", stack_begin = " << nativeStackBegin_ << ", stack_top = " << nativeStackTop
386 << ", stack_size = " << nativeStackSize_ << ", guard_size = " << nativeStackGuardSize_
387 << ", release_size = " << releaseSize;
388 }
389 }
390
DisableStackOverflowCheck()391 void ManagedThread::DisableStackOverflowCheck()
392 {
393 nativeStackEnd_ = nativeStackBegin_;
394 iframeStackSize_ = std::numeric_limits<size_t>::max();
395 if (nativeStackProtectedSize_ > 0) {
396 ark::os::mem::MakeMemReadWrite(ToVoidPtr(nativeStackBegin_), nativeStackProtectedSize_);
397 }
398 }
399
EnableStackOverflowCheck()400 void ManagedThread::EnableStackOverflowCheck()
401 {
402 nativeStackEnd_ = nativeStackBegin_ + nativeStackProtectedSize_ + nativeStackReservedSize_;
403 iframeStackSize_ = nativeStackSize_ * 4U;
404 if (nativeStackProtectedSize_ > 0) {
405 ark::os::mem::MakeMemProtected(ToVoidPtr(nativeStackBegin_), nativeStackProtectedSize_);
406 }
407 }
408
NativeCodeBegin()409 void ManagedThread::NativeCodeBegin()
410 {
411 LOG_IF(!(threadFrameStates_.empty() || threadFrameStates_.top() != NATIVE_CODE), FATAL, RUNTIME)
412 << LogThreadStack(NATIVE_CODE) << " or stack should be empty";
413 threadFrameStates_.push(NATIVE_CODE);
414 UpdateStatus(ThreadStatus::NATIVE);
415 isManagedScope_ = false;
416 }
417
NativeCodeEnd()418 void ManagedThread::NativeCodeEnd()
419 {
420 // thread_frame_states_ should not be accessed without MutatorLock (as runtime could have been destroyed)
421 // If this was last frame, it should have been called from Destroy() and it should UpdateStatus to FINISHED
422 // after this method
423 UpdateStatus(ThreadStatus::RUNNING);
424 isManagedScope_ = true;
425 LOG_IF(threadFrameStates_.empty(), FATAL, RUNTIME) << "stack should be not empty";
426 LOG_IF(threadFrameStates_.top() != NATIVE_CODE, FATAL, RUNTIME) << LogThreadStack(NATIVE_CODE);
427 threadFrameStates_.pop();
428 }
429
IsInNativeCode() const430 bool ManagedThread::IsInNativeCode() const
431 {
432 LOG_IF(HasClearStack(), FATAL, RUNTIME) << "stack should be not empty";
433 return threadFrameStates_.top() == NATIVE_CODE;
434 }
435
ManagedCodeBegin()436 void ManagedThread::ManagedCodeBegin()
437 {
438 // thread_frame_states_ should not be accessed without MutatorLock (as runtime could have been destroyed)
439 UpdateStatus(ThreadStatus::RUNNING);
440 isManagedScope_ = true;
441 LOG_IF(HasClearStack(), FATAL, RUNTIME) << "stack should be not empty";
442 LOG_IF(threadFrameStates_.top() != NATIVE_CODE, FATAL, RUNTIME) << LogThreadStack(MANAGED_CODE);
443 threadFrameStates_.push(MANAGED_CODE);
444 }
445
ManagedCodeEnd()446 void ManagedThread::ManagedCodeEnd()
447 {
448 LOG_IF(HasClearStack(), FATAL, RUNTIME) << "stack should be not empty";
449 LOG_IF(threadFrameStates_.top() != MANAGED_CODE, FATAL, RUNTIME) << LogThreadStack(MANAGED_CODE);
450 threadFrameStates_.pop();
451 // Should be NATIVE_CODE
452 UpdateStatus(ThreadStatus::NATIVE);
453 isManagedScope_ = false;
454 }
455
IsManagedCode() const456 bool ManagedThread::IsManagedCode() const
457 {
458 LOG_IF(HasClearStack(), FATAL, RUNTIME) << "stack should be not empty";
459 return threadFrameStates_.top() == MANAGED_CODE;
460 }
461
462 // Since we don't allow two consecutive NativeCode frames, there is no managed code on stack if
463 // its size is 1 and last frame is Native
HasManagedCodeOnStack() const464 bool ManagedThread::HasManagedCodeOnStack() const
465 {
466 if (HasClearStack()) {
467 return false;
468 }
469 if (threadFrameStates_.size() == 1 && IsInNativeCode()) {
470 return false;
471 }
472 return true;
473 }
474
HasClearStack() const475 bool ManagedThread::HasClearStack() const
476 {
477 return threadFrameStates_.empty();
478 }
479
ThreadStatusAsString(enum ThreadStatus status)480 PandaString ManagedThread::ThreadStatusAsString(enum ThreadStatus status)
481 {
482 switch (status) {
483 case ThreadStatus::CREATED:
484 return "New";
485 case ThreadStatus::RUNNING:
486 return "Runnable";
487 case ThreadStatus::IS_BLOCKED:
488 return "Blocked";
489 case ThreadStatus::IS_WAITING:
490 return "Waiting";
491 case ThreadStatus::IS_TIMED_WAITING:
492 return "Timed_waiting";
493 case ThreadStatus::IS_SUSPENDED:
494 return "Suspended";
495 case ThreadStatus::IS_COMPILER_WAITING:
496 return "Compiler_waiting";
497 case ThreadStatus::IS_WAITING_INFLATION:
498 return "Waiting_inflation";
499 case ThreadStatus::IS_SLEEPING:
500 return "Sleeping";
501 case ThreadStatus::IS_TERMINATED_LOOP:
502 return "Terminated_loop";
503 case ThreadStatus::TERMINATING:
504 return "Terminating";
505 case ThreadStatus::NATIVE:
506 return "Native";
507 case ThreadStatus::FINISHED:
508 return "Terminated";
509 default:
510 return "unknown";
511 }
512 }
513
LogThreadStack(ThreadState newState) const514 PandaString ManagedThread::LogThreadStack(ThreadState newState) const
515 {
516 PandaStringStream debugMessage;
517 static std::unordered_map<ThreadState, std::string> threadStateToStringMap = {
518 {ThreadState::NATIVE_CODE, "NATIVE_CODE"}, {ThreadState::MANAGED_CODE, "MANAGED_CODE"}};
519 auto newStateIt = threadStateToStringMap.find(newState);
520 auto topFrameIt = threadStateToStringMap.find(threadFrameStates_.top());
521 ASSERT(newStateIt != threadStateToStringMap.end());
522 ASSERT(topFrameIt != threadStateToStringMap.end());
523
524 debugMessage << "threadId: " << GetId() << " "
525 << "tried go to " << newStateIt->second << " state, but last frame is: " << topFrameIt->second << ", "
526 << threadFrameStates_.size() << " frames in stack (from up to bottom): [";
527
528 PandaStack<ThreadState> copyStack(threadFrameStates_);
529 while (!copyStack.empty()) {
530 auto it = threadStateToStringMap.find(copyStack.top());
531 ASSERT(it != threadStateToStringMap.end());
532 debugMessage << it->second;
533 if (copyStack.size() > 1) {
534 debugMessage << "|";
535 }
536 copyStack.pop();
537 }
538 debugMessage << "]";
539 return debugMessage.str();
540 }
541
MTManagedThread(ThreadId id,mem::InternalAllocatorPtr allocator,PandaVM * pandaVm,ark::panda_file::SourceLang threadLang)542 MTManagedThread::MTManagedThread(ThreadId id, mem::InternalAllocatorPtr allocator, PandaVM *pandaVm,
543 ark::panda_file::SourceLang threadLang)
544 : ManagedThread(id, allocator, pandaVm, Thread::ThreadType::THREAD_TYPE_MT_MANAGED, threadLang),
545 enteringMonitor_(nullptr)
546 {
547 ASSERT(pandaVm != nullptr);
548 auto threadManager = reinterpret_cast<MTThreadManager *>(GetVM()->GetThreadManager());
549 internalId_ = threadManager->GetInternalThreadId();
550
551 auto ext = Runtime::GetCurrent()->GetClassLinker()->GetExtension(GetThreadLang());
552 if (ext != nullptr) {
553 stringClassPtr_ = ext->GetClassRoot(ClassRoot::STRING);
554 }
555
556 auto *rs = allocator->New<mem::ReferenceStorage>(pandaVm->GetGlobalObjectStorage(), allocator, false);
557 LOG_IF((rs == nullptr || !rs->Init()), FATAL, RUNTIME) << "Cannot create pt reference storage";
558 ptReferenceStorage_ = PandaUniquePtr<mem::ReferenceStorage>(rs);
559 }
560
~MTManagedThread()561 MTManagedThread::~MTManagedThread()
562 {
563 ASSERT(internalId_ != 0);
564 auto threadManager = reinterpret_cast<MTThreadManager *>(GetVM()->GetThreadManager());
565 threadManager->RemoveInternalThreadId(internalId_);
566 }
567
PushLocalObject(ObjectHeader ** objectHeader)568 void ManagedThread::PushLocalObject(ObjectHeader **objectHeader)
569 {
570 ASSERT(TestLockState());
571 localObjects_.push_back(objectHeader);
572 LOG(DEBUG, GC) << "PushLocalObject for thread " << std::hex << this << ", obj = " << *objectHeader;
573 }
574
PopLocalObject()575 void ManagedThread::PopLocalObject()
576 {
577 ASSERT(TestLockState());
578 ASSERT(!localObjects_.empty());
579 LOG(DEBUG, GC) << "PopLocalObject from thread " << std::hex << this << ", obj = " << *localObjects_.back();
580 localObjects_.pop_back();
581 }
582
TestLockState() const583 bool ManagedThread::TestLockState() const
584 {
585 #ifndef NDEBUG
586 // Object handles can be created during class initialization, so check lock state only after GC is started.
587 return !ManagedThread::GetCurrent()->GetVM()->GetGC()->IsGCRunning() ||
588 (GetMutatorLock()->GetState() != MutatorLock::MutatorLockState::UNLOCKED);
589 #else
590 return true;
591 #endif
592 }
593
PushLocalObjectLocked(ObjectHeader * obj)594 void MTManagedThread::PushLocalObjectLocked(ObjectHeader *obj)
595 {
596 localObjectsLocked_.EmplaceBack(obj, GetFrame());
597 }
598
PopLocalObjectLocked(ObjectHeader * out)599 void MTManagedThread::PopLocalObjectLocked([[maybe_unused]] ObjectHeader *out)
600 {
601 if (LIKELY(!localObjectsLocked_.Empty())) {
602 #ifndef NDEBUG
603 ObjectHeader *obj = localObjectsLocked_.Back().GetObject();
604 if (obj != out) {
605 LOG(WARNING, RUNTIME) << "Locked object is not paired";
606 }
607 #endif // !NDEBUG
608 localObjectsLocked_.PopBack();
609 } else {
610 LOG(WARNING, RUNTIME) << "PopLocalObjectLocked failed, current thread locked object is empty";
611 }
612 }
613
GetLockedObjectInfos()614 Span<LockedObjectInfo> MTManagedThread::GetLockedObjectInfos()
615 {
616 return localObjectsLocked_.Data();
617 }
618
UpdateTLAB(mem::TLAB * tlab)619 void ManagedThread::UpdateTLAB(mem::TLAB *tlab)
620 {
621 ASSERT(tlab_ != nullptr);
622 ASSERT(tlab != nullptr);
623 tlab_ = tlab;
624 }
625
ClearTLAB()626 void ManagedThread::ClearTLAB()
627 {
628 ASSERT(zeroTlab_ != nullptr);
629 tlab_ = zeroTlab_;
630 }
631
632 /* Common actions for creation of the thread. */
ProcessCreatedThread()633 void MTManagedThread::ProcessCreatedThread()
634 {
635 ManagedThread::SetCurrent(this);
636 // Runtime takes ownership of the thread
637 trace::ScopedTrace scopedTrace2("ThreadManager::RegisterThread");
638 auto threadManager = reinterpret_cast<MTThreadManager *>(GetVM()->GetThreadManager());
639 threadManager->RegisterThread(this);
640 NativeCodeBegin();
641 }
642
UpdateGCRoots(const GCRootUpdater & gcRootUpdater)643 void ManagedThread::UpdateGCRoots(const GCRootUpdater &gcRootUpdater)
644 {
645 if ((exception_ != nullptr)) {
646 gcRootUpdater(&exception_);
647 }
648 for (auto **localObject : localObjects_) {
649 gcRootUpdater(localObject);
650 }
651
652 if (!taggedHandleScopes_.empty()) {
653 taggedHandleStorage_->UpdateHeapObject(gcRootUpdater);
654 taggedGlobalHandleStorage_->UpdateHeapObject(gcRootUpdater);
655 }
656
657 if (!objectHeaderHandleScopes_.empty()) {
658 objectHeaderHandleStorage_->UpdateHeapObject(gcRootUpdater);
659 }
660 }
661
662 /* return true if sleep is interrupted */
Sleep(uint64_t ms)663 bool MTManagedThread::Sleep(uint64_t ms)
664 {
665 auto thread = MTManagedThread::GetCurrent();
666 ASSERT(thread != nullptr);
667 bool isInterrupted = thread->IsInterrupted();
668 if (!isInterrupted) {
669 thread->TimedWait(ThreadStatus::IS_SLEEPING, ms, 0);
670 isInterrupted = thread->IsInterrupted();
671 }
672 return isInterrupted;
673 }
674
SetThreadPriority(int32_t prio)675 void ManagedThread::SetThreadPriority(int32_t prio)
676 {
677 ThreadId tid = GetId();
678 int res = os::thread::SetPriority(tid, prio);
679 if (!os::thread::IsSetPriorityError(res)) {
680 LOG(DEBUG, RUNTIME) << "Successfully changed priority for thread " << tid << " to " << prio;
681 } else {
682 LOG(DEBUG, RUNTIME) << "Cannot change priority for thread " << tid << " to " << prio;
683 }
684 }
685
GetThreadPriority()686 uint32_t ManagedThread::GetThreadPriority()
687 {
688 ThreadId tid = GetId();
689 return os::thread::GetPriority(tid);
690 }
691
UpdateGCRoots(const GCRootUpdater & gcRootUpdater)692 void MTManagedThread::UpdateGCRoots(const GCRootUpdater &gcRootUpdater)
693 {
694 ManagedThread::UpdateGCRoots(gcRootUpdater);
695 for (auto &it : localObjectsLocked_.Data()) {
696 it.UpdateObject(gcRootUpdater);
697 }
698
699 // Update enter_monitor_object_
700 if (enterMonitorObject_ != nullptr) {
701 gcRootUpdater(&enterMonitorObject_);
702 }
703
704 ptReferenceStorage_->UpdateMovedRefs(gcRootUpdater);
705 }
706
VisitGCRoots(const ObjectVisitor & cb)707 void MTManagedThread::VisitGCRoots(const ObjectVisitor &cb)
708 {
709 ManagedThread::VisitGCRoots(cb);
710
711 // Visit enter_monitor_object_
712 if (enterMonitorObject_ != nullptr) {
713 cb(enterMonitorObject_);
714 }
715
716 ptReferenceStorage_->VisitObjects([&cb](const mem::GCRoot &gcRoot) { cb(gcRoot.GetObjectHeader()); },
717 mem::RootType::ROOT_PT_LOCAL);
718 }
SetDaemon()719 void MTManagedThread::SetDaemon()
720 {
721 isDaemon_ = true;
722 auto threadManager = reinterpret_cast<MTThreadManager *>(GetVM()->GetThreadManager());
723 threadManager->AddDaemonThread();
724 SetThreadPriority(MIN_PRIORITY);
725 }
726
Interrupt(MTManagedThread * thread)727 void MTManagedThread::Interrupt(MTManagedThread *thread)
728 {
729 os::memory::LockHolder lock(thread->condLock_);
730 LOG(DEBUG, RUNTIME) << "Interrupt a thread " << thread->GetId();
731 thread->SetInterruptedWithLockHeld(true);
732 thread->SignalWithLockHeld();
733 thread->InterruptPostImpl();
734 }
735
Interrupted()736 bool MTManagedThread::Interrupted()
737 {
738 os::memory::LockHolder lock(condLock_);
739 bool res = IsInterruptedWithLockHeld();
740 SetInterruptedWithLockHeld(false);
741 return res;
742 }
743
StopDaemonThread()744 void MTManagedThread::StopDaemonThread()
745 {
746 SetRuntimeTerminated();
747 MTManagedThread::Interrupt(this);
748 }
749
VisitGCRoots(const ObjectVisitor & cb)750 void ManagedThread::VisitGCRoots(const ObjectVisitor &cb)
751 {
752 if (exception_ != nullptr) {
753 cb(exception_);
754 }
755 for (auto it : localObjects_) {
756 cb(*it);
757 }
758
759 if (!taggedHandleScopes_.empty()) {
760 taggedHandleStorage_->VisitGCRoots(cb);
761 taggedGlobalHandleStorage_->VisitGCRoots(cb);
762 }
763 if (!objectHeaderHandleScopes_.empty()) {
764 objectHeaderHandleStorage_->VisitGCRoots(cb);
765 }
766 }
767
Destroy()768 void MTManagedThread::Destroy()
769 {
770 ASSERT(this == ManagedThread::GetCurrent());
771 ASSERT(GetStatus() != ThreadStatus::FINISHED);
772
773 UpdateStatus(ThreadStatus::TERMINATING); // Set this status to prevent runtime for destroying itself while this
774 // NATTIVE thread
775 // is trying to acquire runtime.
776 ReleaseMonitors();
777 if (!IsDaemon()) {
778 Runtime *runtime = Runtime::GetCurrent();
779 runtime->GetNotificationManager()->ThreadEndEvent(this);
780 }
781
782 auto threadManager = reinterpret_cast<MTThreadManager *>(GetVM()->GetThreadManager());
783 if (threadManager->UnregisterExitedThread(this)) {
784 // Clear current_thread only if unregistration was successfull
785 ManagedThread::SetCurrent(nullptr);
786 }
787 }
788
GetCustomTLSData(const char * key)789 CustomTLSData *ManagedThread::GetCustomTLSData(const char *key)
790 {
791 os::memory::LockHolder lock(*Locks::customTlsLock_);
792 auto it = customTlsCache_.find(key);
793 if (it == customTlsCache_.end()) {
794 return nullptr;
795 }
796 return it->second.get();
797 }
798
SetCustomTLSData(const char * key,CustomTLSData * data)799 void ManagedThread::SetCustomTLSData(const char *key, CustomTLSData *data)
800 {
801 os::memory::LockHolder lock(*Locks::customTlsLock_);
802 PandaUniquePtr<CustomTLSData> tlsData(data);
803 auto it = customTlsCache_.find(key);
804 if (it == customTlsCache_.end()) {
805 customTlsCache_[key] = {PandaUniquePtr<CustomTLSData>()};
806 }
807 customTlsCache_[key].swap(tlsData);
808 }
809
EraseCustomTLSData(const char * key)810 bool ManagedThread::EraseCustomTLSData(const char *key)
811 {
812 os::memory::LockHolder lock(*Locks::customTlsLock_);
813 return customTlsCache_.erase(key) != 0;
814 }
815
GetLanguageContext()816 LanguageContext ManagedThread::GetLanguageContext()
817 {
818 return Runtime::GetCurrent()->GetLanguageContext(threadLang_);
819 }
820
FreeInternalMemory()821 void MTManagedThread::FreeInternalMemory()
822 {
823 localObjectsLocked_.~LockedObjectList<>();
824 ptReferenceStorage_.reset();
825
826 ManagedThread::FreeInternalMemory();
827 }
828
CollectTLABMetrics()829 void ManagedThread::CollectTLABMetrics()
830 {
831 if (zeroTlab_ != nullptr) {
832 GetVM()->GetHeapManager()->RegisterTLAB(GetTLAB());
833 }
834 }
835
DestroyInternalResources()836 void ManagedThread::DestroyInternalResources()
837 {
838 GetVM()->GetGC()->OnThreadTerminate(this, mem::BuffersKeepingFlag::DELETE);
839 ASSERT(preBuff_ == nullptr);
840 ASSERT(g1PostBarrierRingBuffer_ == nullptr);
841 ptThreadInfo_->Destroy();
842 }
843
CleanupInternalResources()844 void ManagedThread::CleanupInternalResources()
845 {
846 GetVM()->GetGC()->OnThreadTerminate(this, mem::BuffersKeepingFlag::KEEP);
847 }
848
FreeInternalMemory()849 void ManagedThread::FreeInternalMemory()
850 {
851 threadFrameStates_.~PandaStack<ThreadState>();
852 DestroyInternalResources();
853
854 localObjects_.~PandaVector<ObjectHeader **>();
855 {
856 os::memory::LockHolder lock(*Locks::customTlsLock_);
857 customTlsCache_.~PandaMap<const char *, PandaUniquePtr<CustomTLSData>>();
858 }
859
860 mem::InternalAllocatorPtr allocator = Runtime::GetCurrent()->GetInternalAllocator();
861 allocator->Delete(stackFrameAllocator_);
862 allocator->Delete(internalLocalAllocator_);
863
864 allocator->Delete(ptThreadInfo_.release());
865 allocator->Delete(weightedAdaptiveTlabAverage_);
866
867 taggedHandleScopes_.~PandaVector<HandleScope<coretypes::TaggedType> *>();
868 allocator->Delete(taggedHandleStorage_);
869 allocator->Delete(taggedGlobalHandleStorage_);
870
871 allocator->Delete(objectHeaderHandleStorage_);
872 objectHeaderHandleScopes_.~PandaVector<HandleScope<ObjectHeader *> *>();
873
874 Thread::FreeInternalMemory();
875 }
876
PrintSuspensionStackIfNeeded()877 void ManagedThread::PrintSuspensionStackIfNeeded()
878 {
879 /* @sync 1
880 * @description Before getting runtime options
881 */
882 if (!Runtime::GetOptions().IsSafepointBacktrace()) {
883 /* @sync 2
884 * @description After getting runtime options
885 */
886 return;
887 }
888 /* @sync 3
889 * @description After getting runtime options
890 */
891 PandaStringStream out;
892 out << "Thread " << GetId() << " is suspended at\n";
893 PrintStack(out);
894 LOG(INFO, RUNTIME) << out.str();
895 }
896
CleanUp()897 void ManagedThread::CleanUp()
898 {
899 // Cleanup Exception, TLAB, cache interpreter, HandleStorage
900 ClearException();
901 ClearTLAB();
902
903 while (!threadFrameStates_.empty()) {
904 threadFrameStates_.pop();
905 }
906 localObjects_.clear();
907 {
908 os::memory::LockHolder lock(*Locks::customTlsLock_);
909 customTlsCache_.clear();
910 }
911 interpreterCache_.Clear();
912
913 taggedHandleScopes_.clear();
914 taggedHandleStorage_->FreeHandles(0);
915 taggedGlobalHandleStorage_->FreeHandles();
916
917 objectHeaderHandleStorage_->FreeHandles(0);
918 objectHeaderHandleScopes_.clear();
919
920 CleanUpThreadStatus();
921 // NOTE(molotkovnikhail, 13159) Add cleanup of signal_stack for windows target
922 }
923
924 } // namespace ark
925