1 /*
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ecmascript/js_thread.h"
17
18 #include "ecmascript/runtime.h"
19 #include "ecmascript/debugger/js_debugger_manager.h"
20 #include "ecmascript/js_date.h"
21 #include "ecmascript/js_object-inl.h"
22 #include "ecmascript/js_tagged_value.h"
23 #include "ecmascript/runtime_call_id.h"
24
25 #if !defined(PANDA_TARGET_WINDOWS) && !defined(PANDA_TARGET_MACOS) && !defined(PANDA_TARGET_IOS)
26 #include <sys/resource.h>
27 #endif
28
29 #if defined(ENABLE_EXCEPTION_BACKTRACE)
30 #include "ecmascript/platform/backtrace.h"
31 #endif
32 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
33 #include "ecmascript/dfx/cpu_profiler/cpu_profiler.h"
34 #endif
35 #include "ecmascript/dfx/vm_thread_control.h"
36 #include "ecmascript/ecma_global_storage.h"
37 #include "ecmascript/ic/properties_cache.h"
38 #include "ecmascript/interpreter/interpreter.h"
39 #include "ecmascript/mem/concurrent_marker.h"
40 #include "ecmascript/platform/file.h"
41 #include "ecmascript/jit/jit.h"
42
43 namespace panda::ecmascript {
44 using CommonStubCSigns = panda::ecmascript::kungfu::CommonStubCSigns;
45 using BytecodeStubCSigns = panda::ecmascript::kungfu::BytecodeStubCSigns;
46
47 thread_local JSThread *currentThread = nullptr;
48
GetCurrent()49 JSThread *JSThread::GetCurrent()
50 {
51 return currentThread;
52 }
53
54 // static
RegisterThread(JSThread * jsThread)55 void JSThread::RegisterThread(JSThread *jsThread)
56 {
57 Runtime::GetInstance()->RegisterThread(jsThread);
58 // If it is not true, we created a new thread for future fork
59 if (currentThread == nullptr) {
60 currentThread = jsThread;
61 jsThread->UpdateState(ThreadState::NATIVE);
62 }
63 }
64
UnregisterThread(JSThread * jsThread)65 void JSThread::UnregisterThread(JSThread *jsThread)
66 {
67 if (currentThread == jsThread) {
68 jsThread->UpdateState(ThreadState::TERMINATED);
69 currentThread = nullptr;
70 } else {
71 // We have created this JSThread instance but hadn't forked it.
72 ASSERT(jsThread->GetState() == ThreadState::CREATED);
73 jsThread->UpdateState(ThreadState::TERMINATED);
74 }
75 Runtime::GetInstance()->UnregisterThread(jsThread);
76 }
77
78 // static
Create(EcmaVM * vm)79 JSThread *JSThread::Create(EcmaVM *vm)
80 {
81 auto jsThread = new JSThread(vm);
82
83 AsmInterParsedOption asmInterOpt = vm->GetJSOptions().GetAsmInterParsedOption();
84 if (asmInterOpt.enableAsm) {
85 jsThread->EnableAsmInterpreter();
86 }
87
88 jsThread->nativeAreaAllocator_ = vm->GetNativeAreaAllocator();
89 jsThread->heapRegionAllocator_ = vm->GetHeapRegionAllocator();
90 // algin with 16
91 size_t maxStackSize = vm->GetEcmaParamConfiguration().GetMaxStackSize();
92 jsThread->glueData_.frameBase_ = static_cast<JSTaggedType *>(
93 vm->GetNativeAreaAllocator()->Allocate(sizeof(JSTaggedType) * maxStackSize));
94 jsThread->glueData_.currentFrame_ = jsThread->glueData_.frameBase_ + maxStackSize;
95 EcmaInterpreter::InitStackFrame(jsThread);
96
97 jsThread->glueData_.stackLimit_ = GetAsmStackLimit();
98 jsThread->glueData_.stackStart_ = GetCurrentStackPosition();
99 jsThread->glueData_.isEnableMutantArray_ = vm->IsEnableMutantArray();
100 jsThread->glueData_.IsEnableElementsKind_ = vm->IsEnableElementsKind();
101 jsThread->SetThreadId();
102
103 RegisterThread(jsThread);
104 return jsThread;
105 }
106
JSThread(EcmaVM * vm)107 JSThread::JSThread(EcmaVM *vm) : id_(os::thread::GetCurrentThreadId()), vm_(vm)
108 {
109 auto chunk = vm->GetChunk();
110 if (!vm_->GetJSOptions().EnableGlobalLeakCheck()) {
111 globalStorage_ = chunk->New<EcmaGlobalStorage<Node>>(this, vm->GetNativeAreaAllocator());
112 newGlobalHandle_ = [this](JSTaggedType value) { return globalStorage_->NewGlobalHandle(value); };
113 disposeGlobalHandle_ = [this](uintptr_t nodeAddr) { globalStorage_->DisposeGlobalHandle(nodeAddr); };
114 setWeak_ = [this](uintptr_t nodeAddr, void *ref, WeakClearCallback freeGlobalCallBack,
115 WeakClearCallback nativeFinalizeCallBack) {
116 return globalStorage_->SetWeak(nodeAddr, ref, freeGlobalCallBack, nativeFinalizeCallBack);
117 };
118 clearWeak_ = [this](uintptr_t nodeAddr) { return globalStorage_->ClearWeak(nodeAddr); };
119 isWeak_ = [this](uintptr_t addr) { return globalStorage_->IsWeak(addr); };
120 } else {
121 globalDebugStorage_ = chunk->New<EcmaGlobalStorage<DebugNode>>(this, vm->GetNativeAreaAllocator());
122 newGlobalHandle_ = [this](JSTaggedType value) { return globalDebugStorage_->NewGlobalHandle(value); };
123 disposeGlobalHandle_ = [this](uintptr_t nodeAddr) { globalDebugStorage_->DisposeGlobalHandle(nodeAddr); };
124 setWeak_ = [this](uintptr_t nodeAddr, void *ref, WeakClearCallback freeGlobalCallBack,
125 WeakClearCallback nativeFinalizeCallBack) {
126 return globalDebugStorage_->SetWeak(nodeAddr, ref, freeGlobalCallBack, nativeFinalizeCallBack);
127 };
128 clearWeak_ = [this](uintptr_t nodeAddr) { return globalDebugStorage_->ClearWeak(nodeAddr); };
129 isWeak_ = [this](uintptr_t addr) { return globalDebugStorage_->IsWeak(addr); };
130 }
131 vmThreadControl_ = new VmThreadControl(this);
132 SetBCStubStatus(BCStubStatus::NORMAL_BC_STUB);
133 dateUtils_ = new DateUtils();
134 }
135
JSThread(EcmaVM * vm,ThreadType threadType)136 JSThread::JSThread(EcmaVM *vm, ThreadType threadType) : id_(os::thread::GetCurrentThreadId()),
137 vm_(vm), threadType_(threadType)
138 {
139 ASSERT(threadType == ThreadType::JIT_THREAD);
140 // jit thread no need GCIterating
141 readyForGCIterating_ = false;
142 RegisterThread(this);
143 };
144
JSThread(ThreadType threadType)145 JSThread::JSThread(ThreadType threadType) : threadType_(threadType)
146 {
147 ASSERT(threadType == ThreadType::DAEMON_THREAD);
148 // daemon thread no need GCIterating
149 readyForGCIterating_ = false;
150 }
151
~JSThread()152 JSThread::~JSThread()
153 {
154 readyForGCIterating_ = false;
155 if (globalStorage_ != nullptr) {
156 GetEcmaVM()->GetChunk()->Delete(globalStorage_);
157 globalStorage_ = nullptr;
158 }
159 if (globalDebugStorage_ != nullptr) {
160 GetEcmaVM()->GetChunk()->Delete(globalDebugStorage_);
161 globalDebugStorage_ = nullptr;
162 }
163
164 for (auto item : contexts_) {
165 delete item;
166 }
167 contexts_.clear();
168 if (threadType_ == ThreadType::JS_THREAD) {
169 GetNativeAreaAllocator()->Free(glueData_.frameBase_, sizeof(JSTaggedType) *
170 vm_->GetEcmaParamConfiguration().GetMaxStackSize());
171 }
172 GetNativeAreaAllocator()->FreeArea(regExpCache_);
173
174 glueData_.frameBase_ = nullptr;
175 nativeAreaAllocator_ = nullptr;
176 heapRegionAllocator_ = nullptr;
177 regExpCache_ = nullptr;
178 if (vmThreadControl_ != nullptr) {
179 delete vmThreadControl_;
180 vmThreadControl_ = nullptr;
181 }
182 // DaemonThread will be unregistered when the binding std::thread release.
183 if (!IsDaemonThread()) {
184 UnregisterThread(this);
185 }
186 if (dateUtils_ != nullptr) {
187 delete dateUtils_;
188 dateUtils_ = nullptr;
189 }
190 }
191
GetCurrentThreadId()192 ThreadId JSThread::GetCurrentThreadId()
193 {
194 return GetCurrentThreadOrTaskId();
195 }
196
SetException(JSTaggedValue exception)197 void JSThread::SetException(JSTaggedValue exception)
198 {
199 glueData_.exception_ = exception;
200 #if defined(ENABLE_EXCEPTION_BACKTRACE)
201 if (vm_->GetJSOptions().EnableExceptionBacktrace()) {
202 LOG_ECMA(INFO) << "SetException:" << exception.GetRawData();
203 std::ostringstream stack;
204 Backtrace(stack);
205 LOG_ECMA(INFO) << stack.str();
206 }
207 #endif
208 }
209
ClearException()210 void JSThread::ClearException()
211 {
212 glueData_.exception_ = JSTaggedValue::Hole();
213 }
214
GetCurrentLexenv() const215 JSTaggedValue JSThread::GetCurrentLexenv() const
216 {
217 FrameHandler frameHandler(this);
218 return frameHandler.GetEnv();
219 }
220
GetCurrentFunction() const221 JSTaggedValue JSThread::GetCurrentFunction() const
222 {
223 FrameHandler frameHandler(this);
224 return frameHandler.GetFunction();
225 }
226
GetCurrentFrame() const227 const JSTaggedType *JSThread::GetCurrentFrame() const
228 {
229 if (IsAsmInterpreter()) {
230 return GetLastLeaveFrame();
231 }
232 return GetCurrentSPFrame();
233 }
234
SetCurrentFrame(JSTaggedType * sp)235 void JSThread::SetCurrentFrame(JSTaggedType *sp)
236 {
237 if (IsAsmInterpreter()) {
238 return SetLastLeaveFrame(sp);
239 }
240 return SetCurrentSPFrame(sp);
241 }
242
GetCurrentInterpretedFrame() const243 const JSTaggedType *JSThread::GetCurrentInterpretedFrame() const
244 {
245 if (IsAsmInterpreter()) {
246 auto frameHandler = FrameHandler(this);
247 return frameHandler.GetSp();
248 }
249 return GetCurrentSPFrame();
250 }
251
InvokeWeakNodeFreeGlobalCallBack()252 void JSThread::InvokeWeakNodeFreeGlobalCallBack()
253 {
254 while (!weakNodeFreeGlobalCallbacks_.empty()) {
255 auto callbackPair = weakNodeFreeGlobalCallbacks_.back();
256 weakNodeFreeGlobalCallbacks_.pop_back();
257 ASSERT(callbackPair.first != nullptr && callbackPair.second != nullptr);
258 auto callback = callbackPair.first;
259 (*callback)(callbackPair.second);
260 }
261 }
262
InvokeWeakNodeNativeFinalizeCallback()263 void JSThread::InvokeWeakNodeNativeFinalizeCallback()
264 {
265 // the second callback may lead to another GC, if this, return directly;
266 if (runningNativeFinalizeCallbacks_) {
267 return;
268 }
269 runningNativeFinalizeCallbacks_ = true;
270 TRACE_GC(GCStats::Scope::ScopeId::InvokeNativeFinalizeCallbacks, GetEcmaVM()->GetEcmaGCStats());
271 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "InvokeNativeFinalizeCallbacks num:"
272 + std::to_string(weakNodeNativeFinalizeCallbacks_.size()));
273 while (!weakNodeNativeFinalizeCallbacks_.empty()) {
274 auto callbackPair = weakNodeNativeFinalizeCallbacks_.back();
275 weakNodeNativeFinalizeCallbacks_.pop_back();
276 ASSERT(callbackPair.first != nullptr && callbackPair.second != nullptr);
277 auto callback = callbackPair.first;
278 (*callback)(callbackPair.second);
279 }
280 if (finalizeTaskCallback_ != nullptr) {
281 finalizeTaskCallback_();
282 }
283 runningNativeFinalizeCallbacks_ = false;
284 }
285
IsStartGlobalLeakCheck() const286 bool JSThread::IsStartGlobalLeakCheck() const
287 {
288 return GetEcmaVM()->GetJSOptions().IsStartGlobalLeakCheck();
289 }
290
EnableGlobalObjectLeakCheck() const291 bool JSThread::EnableGlobalObjectLeakCheck() const
292 {
293 return GetEcmaVM()->GetJSOptions().EnableGlobalObjectLeakCheck();
294 }
295
EnableGlobalPrimitiveLeakCheck() const296 bool JSThread::EnableGlobalPrimitiveLeakCheck() const
297 {
298 return GetEcmaVM()->GetJSOptions().EnableGlobalPrimitiveLeakCheck();
299 }
300
IsInRunningStateOrProfiling() const301 bool JSThread::IsInRunningStateOrProfiling() const
302 {
303 bool result = IsInRunningState();
304 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
305 result |= vm_->GetHeapProfile() != nullptr;
306 #endif
307 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
308 result |= GetIsProfiling();
309 #endif
310 return result;
311 }
312
WriteToStackTraceFd(std::ostringstream & buffer) const313 void JSThread::WriteToStackTraceFd(std::ostringstream &buffer) const
314 {
315 if (stackTraceFd_ < 0) {
316 return;
317 }
318 buffer << std::endl;
319 DPrintf(reinterpret_cast<fd_t>(stackTraceFd_), buffer.str());
320 buffer.str("");
321 }
322
SetStackTraceFd(int32_t fd)323 void JSThread::SetStackTraceFd(int32_t fd)
324 {
325 stackTraceFd_ = fd;
326 }
327
CloseStackTraceFd()328 void JSThread::CloseStackTraceFd()
329 {
330 if (stackTraceFd_ != -1) {
331 FSync(reinterpret_cast<fd_t>(stackTraceFd_));
332 Close(reinterpret_cast<fd_t>(stackTraceFd_));
333 stackTraceFd_ = -1;
334 }
335 }
336
SetJitCodeMap(JSTaggedType exception,MachineCode * machineCode,std::string & methodName,uintptr_t offset)337 void JSThread::SetJitCodeMap(JSTaggedType exception, MachineCode* machineCode, std::string &methodName,
338 uintptr_t offset)
339 {
340 auto it = jitCodeMaps_.find(exception);
341 if (it != jitCodeMaps_.end()) {
342 it->second->push_back(std::make_tuple(machineCode, methodName, offset));
343 } else {
344 JitCodeVector *jitCode = new JitCodeVector {std::make_tuple(machineCode, methodName, offset)};
345 jitCodeMaps_.emplace(exception, jitCode);
346 }
347 }
348
Iterate(RootVisitor & visitor)349 void JSThread::Iterate(RootVisitor &visitor)
350 {
351 if (!glueData_.exception_.IsHole()) {
352 visitor.VisitRoot(Root::ROOT_VM, ObjectSlot(ToUintPtr(&glueData_.exception_)));
353 }
354 visitor.VisitRangeRoot(Root::ROOT_VM,
355 ObjectSlot(glueData_.builtinEntries_.Begin()), ObjectSlot(glueData_.builtinEntries_.End()));
356
357 // visit stack roots
358 FrameHandler frameHandler(this);
359 frameHandler.Iterate(visitor);
360 for (EcmaContext *context : contexts_) {
361 context->Iterate(visitor);
362 }
363 // visit tagged handle storage roots
364 if (vm_->GetJSOptions().EnableGlobalLeakCheck()) {
365 IterateHandleWithCheck(visitor);
366 } else {
367 size_t globalCount = 0;
368 globalStorage_->IterateUsageGlobal([&visitor, &globalCount](Node *node) {
369 JSTaggedValue value(node->GetObject());
370 if (value.IsHeapObject()) {
371 visitor.VisitRoot(Root::ROOT_HANDLE, ecmascript::ObjectSlot(node->GetObjectAddress()));
372 }
373 globalCount++;
374 });
375 static bool hasCheckedGlobalCount = false;
376 static const size_t WARN_GLOBAL_COUNT = 100000;
377 if (!hasCheckedGlobalCount && globalCount >= WARN_GLOBAL_COUNT) {
378 LOG_ECMA(WARN) << "Global reference count is " << globalCount << ",It exceed the upper limit 100000!";
379 hasCheckedGlobalCount = true;
380 }
381 }
382 }
IterateJitCodeMap(const JitCodeMapVisitor & jitCodeMapVisitor)383 void JSThread::IterateJitCodeMap(const JitCodeMapVisitor &jitCodeMapVisitor)
384 {
385 jitCodeMapVisitor(jitCodeMaps_);
386 }
387
IterateHandleWithCheck(RootVisitor & visitor)388 void JSThread::IterateHandleWithCheck(RootVisitor &visitor)
389 {
390 size_t handleCount = 0;
391 for (EcmaContext *context : contexts_) {
392 handleCount += context->IterateHandle(visitor);
393 }
394
395 size_t globalCount = 0;
396 static const int JS_TYPE_SUM = static_cast<int>(JSType::TYPE_LAST) + 1;
397 int typeCount[JS_TYPE_SUM] = { 0 };
398 int primitiveCount = 0;
399 bool isStopObjectLeakCheck = EnableGlobalObjectLeakCheck() && !IsStartGlobalLeakCheck() && stackTraceFd_ > 0;
400 bool isStopPrimitiveLeakCheck = EnableGlobalPrimitiveLeakCheck() && !IsStartGlobalLeakCheck() && stackTraceFd_ > 0;
401 std::ostringstream buffer;
402 globalDebugStorage_->IterateUsageGlobal([this, &visitor, &globalCount, &typeCount, &primitiveCount,
403 isStopObjectLeakCheck, isStopPrimitiveLeakCheck, &buffer](DebugNode *node) {
404 node->MarkCount();
405 JSTaggedValue value(node->GetObject());
406 if (value.IsHeapObject()) {
407 visitor.VisitRoot(Root::ROOT_HANDLE, ecmascript::ObjectSlot(node->GetObjectAddress()));
408 TaggedObject *object = value.GetTaggedObject();
409 MarkWord word(value.GetTaggedObject());
410 if (word.IsForwardingAddress()) {
411 object = word.ToForwardingAddress();
412 }
413 typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
414
415 // Print global information about possible memory leaks.
416 // You can print the global new stack within the range of the leaked global number.
417 if (isStopObjectLeakCheck && node->GetGlobalNumber() > 0 && node->GetMarkCount() > 0) {
418 buffer << "Global maybe leak object address:" << std::hex << object <<
419 ", type:" << JSHClass::DumpJSType(JSType(object->GetClass()->GetObjectType())) <<
420 ", node address:" << node << ", number:" << std::dec << node->GetGlobalNumber() <<
421 ", markCount:" << node->GetMarkCount();
422 WriteToStackTraceFd(buffer);
423 }
424 } else {
425 primitiveCount++;
426 if (isStopPrimitiveLeakCheck && node->GetGlobalNumber() > 0 && node->GetMarkCount() > 0) {
427 buffer << "Global maybe leak primitive:" << std::hex << value.GetRawData() <<
428 ", node address:" << node << ", number:" << std::dec << node->GetGlobalNumber() <<
429 ", markCount:" << node->GetMarkCount();
430 WriteToStackTraceFd(buffer);
431 }
432 }
433 globalCount++;
434 });
435
436 if (isStopObjectLeakCheck || isStopPrimitiveLeakCheck) {
437 buffer << "Global leak check success!";
438 WriteToStackTraceFd(buffer);
439 CloseStackTraceFd();
440 }
441 // Determine whether memory leakage by checking handle and global count.
442 LOG_ECMA(INFO) << "Iterate root handle count:" << handleCount << ", global handle count:" << globalCount;
443 OPTIONAL_LOG(GetEcmaVM(), INFO) << "Global type Primitive count:" << primitiveCount;
444 // Print global object type statistic.
445 static const int MIN_COUNT_THRESHOLD = 50;
446 for (int i = 0; i < JS_TYPE_SUM; i++) {
447 if (typeCount[i] > MIN_COUNT_THRESHOLD) {
448 OPTIONAL_LOG(GetEcmaVM(), INFO) << "Global type " << JSHClass::DumpJSType(JSType(i))
449 << " count:" << typeCount[i];
450 }
451 }
452 }
453
IterateWeakEcmaGlobalStorage(const WeakRootVisitor & visitor,GCKind gcKind)454 void JSThread::IterateWeakEcmaGlobalStorage(const WeakRootVisitor &visitor, GCKind gcKind)
455 {
456 auto callBack = [this, visitor, gcKind](WeakNode *node) {
457 JSTaggedValue value(node->GetObject());
458 if (!value.IsHeapObject()) {
459 return;
460 }
461 auto object = value.GetTaggedObject();
462 auto fwd = visitor(object);
463 if (fwd == nullptr) {
464 // undefind
465 node->SetObject(JSTaggedValue::Undefined().GetRawData());
466 auto nativeFinalizeCallback = node->GetNativeFinalizeCallback();
467 if (nativeFinalizeCallback) {
468 weakNodeNativeFinalizeCallbacks_.push_back(std::make_pair(nativeFinalizeCallback,
469 node->GetReference()));
470 }
471 auto freeGlobalCallBack = node->GetFreeGlobalCallback();
472 if (!freeGlobalCallBack) {
473 // If no callback, dispose global immediately
474 DisposeGlobalHandle(ToUintPtr(node));
475 } else if (gcKind == GCKind::SHARED_GC) {
476 // For shared GC, free global should defer execute in its own thread
477 weakNodeFreeGlobalCallbacks_.push_back(std::make_pair(freeGlobalCallBack, node->GetReference()));
478 } else {
479 node->CallFreeGlobalCallback();
480 }
481 } else if (fwd != object) {
482 // update
483 node->SetObject(JSTaggedValue(fwd).GetRawData());
484 }
485 };
486 if (!vm_->GetJSOptions().EnableGlobalLeakCheck()) {
487 globalStorage_->IterateWeakUsageGlobal(callBack);
488 } else {
489 globalDebugStorage_->IterateWeakUsageGlobal(callBack);
490 }
491 }
492
UpdateJitCodeMapReference(const WeakRootVisitor & visitor)493 void JSThread::UpdateJitCodeMapReference(const WeakRootVisitor &visitor)
494 {
495 auto it = jitCodeMaps_.begin();
496 while (it != jitCodeMaps_.end()) {
497 auto obj = reinterpret_cast<TaggedObject *>(it->first);
498 auto fwd = visitor(obj);
499 if (fwd == nullptr) {
500 delete it->second;
501 it = jitCodeMaps_.erase(it);
502 } else if (fwd != obj) {
503 jitCodeMaps_.emplace(JSTaggedValue(fwd).GetRawData(), it->second);
504 it = jitCodeMaps_.erase(it);
505 } else {
506 ++it;
507 }
508 }
509 }
510
DoStackOverflowCheck(const JSTaggedType * sp)511 bool JSThread::DoStackOverflowCheck(const JSTaggedType *sp)
512 {
513 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
514 if (UNLIKELY(!IsCrossThreadExecutionEnable() && sp <= glueData_.frameBase_ + RESERVE_STACK_SIZE)) {
515 vm_->CheckThread();
516 LOG_ECMA(ERROR) << "Stack overflow! Remaining stack size is: " << (sp - glueData_.frameBase_);
517 if (LIKELY(!HasPendingException())) {
518 ObjectFactory *factory = GetEcmaVM()->GetFactory();
519 JSHandle<JSObject> error = factory->GetJSError(base::ErrorType::RANGE_ERROR,
520 "Stack overflow!", StackCheck::NO);
521 SetException(error.GetTaggedValue());
522 }
523 return true;
524 }
525 return false;
526 }
527
DoStackLimitCheck()528 bool JSThread::DoStackLimitCheck()
529 {
530 if (UNLIKELY(!IsCrossThreadExecutionEnable() && GetCurrentStackPosition() < GetStackLimit())) {
531 vm_->CheckThread();
532 LOG_ECMA(ERROR) << "Stack overflow! current:" << GetCurrentStackPosition() << " limit:" << GetStackLimit();
533 if (LIKELY(!HasPendingException())) {
534 ObjectFactory *factory = GetEcmaVM()->GetFactory();
535 JSHandle<JSObject> error = factory->GetJSError(base::ErrorType::RANGE_ERROR,
536 "Stack overflow!", StackCheck::NO);
537 SetException(error.GetTaggedValue());
538 }
539 return true;
540 }
541 return false;
542 }
543
ExpandHandleStorage()544 uintptr_t *JSThread::ExpandHandleStorage()
545 {
546 return GetCurrentEcmaContext()->ExpandHandleStorage();
547 }
548
ShrinkHandleStorage(int prevIndex)549 void JSThread::ShrinkHandleStorage(int prevIndex)
550 {
551 GetCurrentEcmaContext()->ShrinkHandleStorage(prevIndex);
552 }
553
NotifyArrayPrototypeChangedGuardians(JSHandle<JSObject> receiver)554 void JSThread::NotifyArrayPrototypeChangedGuardians(JSHandle<JSObject> receiver)
555 {
556 if (!glueData_.arrayPrototypeChangedGuardians_) {
557 return;
558 }
559 if (!receiver->GetJSHClass()->IsPrototype() && !receiver->IsJSArray()) {
560 return;
561 }
562 auto env = GetEcmaVM()->GetGlobalEnv();
563 if (receiver.GetTaggedValue() == env->GetObjectFunctionPrototype().GetTaggedValue() ||
564 receiver.GetTaggedValue() == env->GetArrayPrototype().GetTaggedValue()) {
565 glueData_.arrayPrototypeChangedGuardians_ = false;
566 return;
567 }
568 }
569
ResetGuardians()570 void JSThread::ResetGuardians()
571 {
572 glueData_.arrayPrototypeChangedGuardians_ = true;
573 }
574
SetInitialBuiltinHClass(BuiltinTypeId type,JSHClass * builtinHClass,JSHClass * instanceHClass,JSHClass * prototypeHClass,JSHClass * prototypeOfPrototypeHClass,JSHClass * extraHClass)575 void JSThread::SetInitialBuiltinHClass(
576 BuiltinTypeId type, JSHClass *builtinHClass, JSHClass *instanceHClass,
577 JSHClass *prototypeHClass, JSHClass *prototypeOfPrototypeHClass, JSHClass *extraHClass)
578 {
579 size_t index = BuiltinHClassEntries::GetEntryIndex(type);
580 auto &entry = glueData_.builtinHClassEntries_.entries[index];
581 LOG_ECMA(DEBUG) << "JSThread::SetInitialBuiltinHClass: "
582 << "Builtin = " << ToString(type)
583 << ", builtinHClass = " << builtinHClass
584 << ", instanceHClass = " << instanceHClass
585 << ", prototypeHClass = " << prototypeHClass
586 << ", prototypeOfPrototypeHClass = " << prototypeOfPrototypeHClass
587 << ", extraHClass = " << extraHClass;
588 entry.builtinHClass = builtinHClass;
589 entry.instanceHClass = instanceHClass;
590 entry.prototypeHClass = prototypeHClass;
591 entry.prototypeOfPrototypeHClass = prototypeOfPrototypeHClass;
592 entry.extraHClass = extraHClass;
593 }
594
SetInitialBuiltinGlobalHClass(JSHClass * builtinHClass,GlobalIndex globalIndex)595 void JSThread::SetInitialBuiltinGlobalHClass(
596 JSHClass *builtinHClass, GlobalIndex globalIndex)
597 {
598 auto &map = ctorHclassEntries_;
599 map[builtinHClass] = globalIndex;
600 }
601
GetBuiltinHClass(BuiltinTypeId type) const602 JSHClass *JSThread::GetBuiltinHClass(BuiltinTypeId type) const
603 {
604 size_t index = BuiltinHClassEntries::GetEntryIndex(type);
605 return glueData_.builtinHClassEntries_.entries[index].builtinHClass;
606 }
607
GetBuiltinInstanceHClass(BuiltinTypeId type) const608 JSHClass *JSThread::GetBuiltinInstanceHClass(BuiltinTypeId type) const
609 {
610 size_t index = BuiltinHClassEntries::GetEntryIndex(type);
611 return glueData_.builtinHClassEntries_.entries[index].instanceHClass;
612 }
613
GetBuiltinExtraHClass(BuiltinTypeId type) const614 JSHClass *JSThread::GetBuiltinExtraHClass(BuiltinTypeId type) const
615 {
616 size_t index = BuiltinHClassEntries::GetEntryIndex(type);
617 return glueData_.builtinHClassEntries_.entries[index].extraHClass;
618 }
619
GetBuiltinPrototypeHClass(BuiltinTypeId type) const620 JSHClass *JSThread::GetBuiltinPrototypeHClass(BuiltinTypeId type) const
621 {
622 size_t index = BuiltinHClassEntries::GetEntryIndex(type);
623 return glueData_.builtinHClassEntries_.entries[index].prototypeHClass;
624 }
625
GetBuiltinPrototypeOfPrototypeHClass(BuiltinTypeId type) const626 JSHClass *JSThread::GetBuiltinPrototypeOfPrototypeHClass(BuiltinTypeId type) const
627 {
628 size_t index = BuiltinHClassEntries::GetEntryIndex(type);
629 return glueData_.builtinHClassEntries_.entries[index].prototypeOfPrototypeHClass;
630 }
631
GetBuiltinHClassOffset(BuiltinTypeId type,bool isArch32)632 size_t JSThread::GetBuiltinHClassOffset(BuiltinTypeId type, bool isArch32)
633 {
634 return GetGlueDataOffset() + GlueData::GetBuiltinHClassOffset(type, isArch32);
635 }
636
GetBuiltinPrototypeHClassOffset(BuiltinTypeId type,bool isArch32)637 size_t JSThread::GetBuiltinPrototypeHClassOffset(BuiltinTypeId type, bool isArch32)
638 {
639 return GetGlueDataOffset() + GlueData::GetBuiltinPrototypeHClassOffset(type, isArch32);
640 }
641
CheckSwitchDebuggerBCStub()642 void JSThread::CheckSwitchDebuggerBCStub()
643 {
644 auto isDebug = GetEcmaVM()->GetJsDebuggerManager()->IsDebugMode();
645 if (LIKELY(!isDebug)) {
646 if (glueData_.bcStubEntries_.Get(0) == glueData_.bcStubEntries_.Get(1)) {
647 for (size_t i = 0; i < BCStubEntries::BC_HANDLER_COUNT; i++) {
648 auto stubEntry = glueData_.bcDebuggerStubEntries_.Get(i);
649 auto debuggerStubEbtry = glueData_.bcStubEntries_.Get(i);
650 glueData_.bcStubEntries_.Set(i, stubEntry);
651 glueData_.bcDebuggerStubEntries_.Set(i, debuggerStubEbtry);
652 }
653 }
654 } else {
655 if (glueData_.bcDebuggerStubEntries_.Get(0) == glueData_.bcDebuggerStubEntries_.Get(1)) {
656 for (size_t i = 0; i < BCStubEntries::BC_HANDLER_COUNT; i++) {
657 auto stubEntry = glueData_.bcStubEntries_.Get(i);
658 auto debuggerStubEbtry = glueData_.bcDebuggerStubEntries_.Get(i);
659 glueData_.bcDebuggerStubEntries_.Set(i, stubEntry);
660 glueData_.bcStubEntries_.Set(i, debuggerStubEbtry);
661 }
662 }
663 }
664 }
665
CheckOrSwitchPGOStubs()666 void JSThread::CheckOrSwitchPGOStubs()
667 {
668 bool isSwitch = false;
669 if (IsPGOProfilerEnable()) {
670 if (GetBCStubStatus() == BCStubStatus::NORMAL_BC_STUB) {
671 SetBCStubStatus(BCStubStatus::PROFILE_BC_STUB);
672 isSwitch = true;
673 }
674 } else {
675 if (GetBCStubStatus() == BCStubStatus::PROFILE_BC_STUB) {
676 SetBCStubStatus(BCStubStatus::NORMAL_BC_STUB);
677 isSwitch = true;
678 }
679 }
680 if (isSwitch) {
681 Address curAddress;
682 #define SWITCH_PGO_STUB_ENTRY(fromName, toName, ...) \
683 curAddress = GetBCStubEntry(BytecodeStubCSigns::ID_##fromName); \
684 SetBCStubEntry(BytecodeStubCSigns::ID_##fromName, GetBCStubEntry(BytecodeStubCSigns::ID_##toName)); \
685 SetBCStubEntry(BytecodeStubCSigns::ID_##toName, curAddress);
686 ASM_INTERPRETER_BC_PROFILER_STUB_LIST(SWITCH_PGO_STUB_ENTRY)
687 #undef SWITCH_PGO_STUB_ENTRY
688 }
689 }
690
SwitchJitProfileStubs(bool isEnablePgo)691 void JSThread::SwitchJitProfileStubs(bool isEnablePgo)
692 {
693 if (isEnablePgo) {
694 SetPGOProfilerEnable(true);
695 CheckOrSwitchPGOStubs();
696 return;
697 }
698 bool isSwitch = false;
699 if (GetBCStubStatus() == BCStubStatus::NORMAL_BC_STUB) {
700 SetBCStubStatus(BCStubStatus::JIT_PROFILE_BC_STUB);
701 isSwitch = true;
702 }
703 if (isSwitch) {
704 Address curAddress;
705 #define SWITCH_PGO_STUB_ENTRY(fromName, toName, ...) \
706 curAddress = GetBCStubEntry(BytecodeStubCSigns::ID_##fromName); \
707 SetBCStubEntry(BytecodeStubCSigns::ID_##fromName, GetBCStubEntry(BytecodeStubCSigns::ID_##toName)); \
708 SetBCStubEntry(BytecodeStubCSigns::ID_##toName, curAddress);
709 ASM_INTERPRETER_BC_JIT_PROFILER_STUB_LIST(SWITCH_PGO_STUB_ENTRY)
710 #undef SWITCH_PGO_STUB_ENTRY
711 }
712 }
713
TerminateExecution()714 void JSThread::TerminateExecution()
715 {
716 // set the TERMINATE_ERROR to exception
717 ObjectFactory *factory = GetEcmaVM()->GetFactory();
718 JSHandle<JSObject> error = factory->GetJSError(ErrorType::TERMINATION_ERROR,
719 "Terminate execution!", StackCheck::NO);
720 SetException(error.GetTaggedValue());
721 }
722
CheckAndPassActiveBarrier()723 void JSThread::CheckAndPassActiveBarrier()
724 {
725 ThreadStateAndFlags oldStateAndFlags;
726 oldStateAndFlags.asNonvolatileInt = glueData_.stateAndFlags_.asInt;
727 if ((oldStateAndFlags.asNonvolatileStruct.flags & ThreadFlag::ACTIVE_BARRIER) != 0) {
728 PassSuspendBarrier();
729 }
730 }
731
PassSuspendBarrier()732 bool JSThread::PassSuspendBarrier()
733 {
734 // Use suspendLock_ to avoid data-race between suspend-all-thread and suspended-threads.
735 LockHolder lock(suspendLock_);
736 if (suspendBarrier_ != nullptr) {
737 suspendBarrier_->PassStrongly();
738 suspendBarrier_ = nullptr;
739 ClearFlag(ThreadFlag::ACTIVE_BARRIER);
740 return true;
741 }
742 return false;
743 }
744
ShouldHandleMarkingFinishedInSafepoint()745 bool JSThread::ShouldHandleMarkingFinishedInSafepoint()
746 {
747 auto heap = const_cast<Heap *>(GetEcmaVM()->GetHeap());
748 return IsMarkFinished() && heap->GetConcurrentMarker()->IsTriggeredConcurrentMark() &&
749 !heap->GetOnSerializeEvent() && !heap->InSensitiveStatus() && !heap->CheckIfNeedStopCollectionByStartup();
750 }
751
CheckSafepoint()752 bool JSThread::CheckSafepoint()
753 {
754 ResetCheckSafePointStatus();
755
756 if UNLIKELY(HasTerminationRequest()) {
757 TerminateExecution();
758 SetVMTerminated(true);
759 SetTerminationRequest(false);
760 }
761
762 if UNLIKELY(HasSuspendRequest()) {
763 WaitSuspension();
764 }
765
766 // vmThreadControl_ 's thread_ is current JSThread's this.
767 if UNLIKELY(VMNeedSuspension()) {
768 vmThreadControl_->SuspendVM();
769 }
770 if (HasInstallMachineCode()) {
771 vm_->GetJit()->InstallTasks(this);
772 SetInstallMachineCode(false);
773 }
774
775 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
776 if UNLIKELY(needProfiling_.load() && !isProfiling_) {
777 DFXJSNApi::StartCpuProfilerForFile(vm_, profileName_, CpuProfiler::INTERVAL_OF_INNER_START);
778 SetNeedProfiling(false);
779 }
780 #endif // ECMASCRIPT_SUPPORT_CPUPROFILER
781 bool gcTriggered = false;
782 #ifndef NDEBUG
783 if (vm_->GetJSOptions().EnableForceGC()) {
784 GetEcmaVM()->CollectGarbage(TriggerGCType::FULL_GC);
785 gcTriggered = true;
786 }
787 #endif
788 auto heap = const_cast<Heap *>(GetEcmaVM()->GetHeap());
789 // Handle exit app senstive scene
790 heap->HandleExitHighSensitiveEvent();
791
792 // Do not trigger local gc during the shared gc processRset process.
793 if UNLIKELY(IsProcessingLocalToSharedRset()) {
794 return false;
795 }
796 // After concurrent mark finish, should trigger gc here to avoid create much floating garbage
797 // except in serialize or high sensitive event
798 if UNLIKELY(ShouldHandleMarkingFinishedInSafepoint()) {
799 heap->SetCanThrowOOMError(false);
800 heap->GetConcurrentMarker()->HandleMarkingFinished();
801 heap->SetCanThrowOOMError(true);
802 gcTriggered = true;
803 }
804 return gcTriggered;
805 }
806
CheckJSTaggedType(JSTaggedType value) const807 void JSThread::CheckJSTaggedType(JSTaggedType value) const
808 {
809 if (JSTaggedValue(value).IsHeapObject() &&
810 !GetEcmaVM()->GetHeap()->IsAlive(reinterpret_cast<TaggedObject *>(value))) {
811 LOG_FULL(FATAL) << "value:" << value << " is invalid!";
812 }
813 }
814
CpuProfilerCheckJSTaggedType(JSTaggedType value) const815 bool JSThread::CpuProfilerCheckJSTaggedType(JSTaggedType value) const
816 {
817 if (JSTaggedValue(value).IsHeapObject() &&
818 !GetEcmaVM()->GetHeap()->IsAlive(reinterpret_cast<TaggedObject *>(value))) {
819 return false;
820 }
821 return true;
822 }
823
824 // static
GetAsmStackLimit()825 size_t JSThread::GetAsmStackLimit()
826 {
827 #if !defined(PANDA_TARGET_WINDOWS) && !defined(PANDA_TARGET_MACOS) && !defined(PANDA_TARGET_IOS)
828 // js stack limit
829 uintptr_t currentStackPos = GetCurrentStackPosition();
830 size_t defaultStackSize = EcmaParamConfiguration::GetDefalutStackSize();
831 if (currentStackPos < defaultStackSize) {
832 LOG_FULL(FATAL) << "Too small stackSize to run jsvm"
833 << ", currentStackPos: " << reinterpret_cast<void *>(currentStackPos);
834 }
835 size_t result = currentStackPos - defaultStackSize;
836 int ret = -1;
837 void *stackAddr = nullptr;
838 size_t size = 0;
839 #if defined(ENABLE_FFRT_INTERFACES)
840 if (!ffrt_get_current_coroutine_stack(&stackAddr, &size)) {
841 pthread_attr_t attr;
842 ret = pthread_getattr_np(pthread_self(), &attr);
843 if (ret != 0) {
844 LOG_ECMA(ERROR) << "Get current thread attr failed";
845 return result;
846 }
847 ret = pthread_attr_getstack(&attr, &stackAddr, &size);
848 if (pthread_attr_destroy(&attr) != 0) {
849 LOG_ECMA(ERROR) << "Destroy current thread attr failed";
850 }
851 if (ret != 0) {
852 LOG_ECMA(ERROR) << "Get current thread stack size failed";
853 return result;
854 }
855 }
856 #else
857 pthread_attr_t attr;
858 ret = pthread_getattr_np(pthread_self(), &attr);
859 if (ret != 0) {
860 LOG_ECMA(ERROR) << "Get current thread attr failed";
861 return result;
862 }
863 ret = pthread_attr_getstack(&attr, &stackAddr, &size);
864 if (pthread_attr_destroy(&attr) != 0) {
865 LOG_ECMA(ERROR) << "Destroy current thread attr failed";
866 }
867 if (ret != 0) {
868 LOG_ECMA(ERROR) << "Get current thread stack size failed";
869 return result;
870 }
871 #endif
872
873 bool isMainThread = IsMainThread();
874 uintptr_t threadStackLimit = reinterpret_cast<uintptr_t>(stackAddr);
875 uintptr_t threadStackStart = threadStackLimit + size;
876 if (isMainThread) {
877 struct rlimit rl;
878 ret = getrlimit(RLIMIT_STACK, &rl);
879 if (ret != 0) {
880 LOG_ECMA(ERROR) << "Get current thread stack size failed";
881 return result;
882 }
883 if (rl.rlim_cur > DEFAULT_MAX_SYSTEM_STACK_SIZE) {
884 LOG_ECMA(ERROR) << "Get current thread stack size exceed " << DEFAULT_MAX_SYSTEM_STACK_SIZE
885 << " : " << rl.rlim_cur;
886 return result;
887 }
888 threadStackLimit = threadStackStart - rl.rlim_cur;
889 }
890
891 if (result < threadStackLimit) {
892 result = threadStackLimit;
893 }
894
895 LOG_INTERPRETER(DEBUG) << "Current thread stack start: " << reinterpret_cast<void *>(threadStackStart);
896 LOG_INTERPRETER(DEBUG) << "Used stack before js stack start: "
897 << reinterpret_cast<void *>(threadStackStart - currentStackPos);
898 LOG_INTERPRETER(DEBUG) << "Current thread asm stack limit: " << reinterpret_cast<void *>(result);
899 uintptr_t currentThreadAsmStackLimit = result;
900 // To avoid too much times of stack overflow checking, we only check stack overflow before push vregs or
901 // parameters of variable length. So we need a reserved size of stack to make sure stack won't be overflowed
902 // when push other data.
903 result += EcmaParamConfiguration::GetDefaultReservedStackSize();
904 if (threadStackStart <= result) {
905 LOG_FULL(FATAL) << "Too small stackSize to run jsvm"
906 << ", CurrentStackPosition: " << reinterpret_cast<void *>(currentStackPos)
907 << ", StackAddr: " << stackAddr << ", Size: " << reinterpret_cast<void *>(size)
908 << ", ThreadStackLimit: " << reinterpret_cast<void *>(threadStackLimit)
909 << ", ThreadStackStart: " << reinterpret_cast<void *>(threadStackStart)
910 << ", Used stack before js stack start: "
911 << reinterpret_cast<void *>(threadStackStart - currentStackPos)
912 << ", Current thread asm stack limit: " << reinterpret_cast<void *>(currentThreadAsmStackLimit)
913 << ", Result: " << reinterpret_cast<void *>(result);
914 }
915 return result;
916 #else
917 return 0;
918 #endif
919 }
920
IsLegalAsmSp(uintptr_t sp) const921 bool JSThread::IsLegalAsmSp(uintptr_t sp) const
922 {
923 uint64_t bottom = GetStackLimit() - EcmaParamConfiguration::GetDefaultReservedStackSize();
924 uint64_t top = GetStackStart() + EcmaParamConfiguration::GetAllowedUpperStackDiff();
925 return (bottom <= sp && sp <= top);
926 }
927
IsLegalThreadSp(uintptr_t sp) const928 bool JSThread::IsLegalThreadSp(uintptr_t sp) const
929 {
930 uintptr_t bottom = reinterpret_cast<uintptr_t>(glueData_.frameBase_);
931 size_t maxStackSize = vm_->GetEcmaParamConfiguration().GetMaxStackSize();
932 uintptr_t top = bottom + maxStackSize;
933 return (bottom <= sp && sp <= top);
934 }
935
IsLegalSp(uintptr_t sp) const936 bool JSThread::IsLegalSp(uintptr_t sp) const
937 {
938 return IsLegalAsmSp(sp) || IsLegalThreadSp(sp);
939 }
940
IsMainThread()941 bool JSThread::IsMainThread()
942 {
943 #if !defined(PANDA_TARGET_WINDOWS) && !defined(PANDA_TARGET_MACOS) && !defined(PANDA_TARGET_IOS)
944 return getpid() == syscall(SYS_gettid);
945 #else
946 return true;
947 #endif
948 }
949
PushContext(EcmaContext * context)950 void JSThread::PushContext(EcmaContext *context)
951 {
952 const_cast<Heap *>(vm_->GetHeap())->WaitAllTasksFinished();
953 contexts_.emplace_back(context);
954
955 if (!glueData_.currentContext_) {
956 // The first context in ecma vm.
957 glueData_.currentContext_ = context;
958 }
959 }
960
PopContext()961 void JSThread::PopContext()
962 {
963 contexts_.pop_back();
964 glueData_.currentContext_ = contexts_.back();
965 }
966
SwitchCurrentContext(EcmaContext * currentContext,bool isInIterate)967 void JSThread::SwitchCurrentContext(EcmaContext *currentContext, bool isInIterate)
968 {
969 ASSERT(std::count(contexts_.begin(), contexts_.end(), currentContext));
970 glueData_.currentContext_->SetGlobalEnv(GetGlueGlobalEnv());
971 // When the glueData_.currentContext_ is not fully initialized,glueData_.globalObject_ will be hole.
972 // Assigning hole to JSGlobalObject could cause a mistake at builtins initalization.
973 if (!glueData_.globalObject_.IsHole()) {
974 glueData_.currentContext_->GetGlobalEnv()->SetJSGlobalObject(this, glueData_.globalObject_);
975 }
976 if (!currentContext->GlobalEnvIsHole()) {
977 SetGlueGlobalEnv(*(currentContext->GetGlobalEnv()));
978 /**
979 * GlobalObject has two copies, one in GlueData and one in Context.GlobalEnv, when switch context, will save
980 * GlobalObject in GlueData to CurrentContext.GlobalEnv(is this nessary?), and then switch to new context,
981 * save the GlobalObject in NewContext.GlobalEnv to GlueData.
982 * The initial value of GlobalObject in Context.GlobalEnv is Undefined, but in GlueData is Hole,
983 * so if two SharedGC happened during the builtins initalization like this, maybe will cause incorrect scene:
984 *
985 * Default:
986 * Slot for GlobalObject: Context.GlobalEnv GlueData
987 * value: Undefined Hole
988 *
989 * First SharedGC(JSThread::SwitchCurrentContext), Set GlobalObject from Context.GlobalEnv to GlueData:
990 * Slot for GlobalObject: Context.GlobalEnv GlueData
991 * value: Undefined Undefined
992 *
993 * Builtins Initialize, Create GlobalObject and Set to Context.GlobalEnv:
994 * Slot for GlobalObject: Context.GlobalEnv GlueData
995 * value: Obj Undefined
996 *
997 * Second SharedGC(JSThread::SwitchCurrentContext), Set GlobalObject from GlueData to Context.GlobalEnv:
998 * Slot for GlobalObject: Context.GlobalEnv GlueData
999 * value: Undefined Undefined
1000 *
1001 * So when copy values between Context.GlobalEnv and GlueData, need to check if the value is Hole in GlueData,
1002 * and if is Undefined in Context.GlobalEnv, because the initial value is different.
1003 */
1004 if (!currentContext->GetGlobalEnv()->GetGlobalObject().IsUndefined()) {
1005 SetGlobalObject(currentContext->GetGlobalEnv()->GetGlobalObject());
1006 }
1007 }
1008 if (!isInIterate) {
1009 // If isInIterate is true, it means it is in GC iterate and global variables are no need to change.
1010 glueData_.globalConst_ = const_cast<GlobalEnvConstants *>(currentContext->GlobalConstants());
1011 }
1012
1013 glueData_.currentContext_ = currentContext;
1014 }
1015
EraseContext(EcmaContext * context)1016 bool JSThread::EraseContext(EcmaContext *context)
1017 {
1018 const_cast<Heap *>(vm_->GetHeap())->WaitAllTasksFinished();
1019 bool isCurrentContext = false;
1020 auto iter = std::find(contexts_.begin(), contexts_.end(), context);
1021 if (*iter == context) {
1022 if (glueData_.currentContext_ == context) {
1023 isCurrentContext = true;
1024 }
1025 contexts_.erase(iter);
1026 if (isCurrentContext) {
1027 SwitchCurrentContext(contexts_.back());
1028 }
1029 return true;
1030 }
1031 return false;
1032 }
1033
ClearContextCachedConstantPool()1034 void JSThread::ClearContextCachedConstantPool()
1035 {
1036 for (EcmaContext *context : contexts_) {
1037 context->ClearCachedConstantPool();
1038 }
1039 }
1040
GetPropertiesCache() const1041 PropertiesCache *JSThread::GetPropertiesCache() const
1042 {
1043 return glueData_.currentContext_->GetPropertiesCache();
1044 }
1045
GetLoadMegaICCache() const1046 MegaICCache *JSThread::GetLoadMegaICCache() const
1047 {
1048 return glueData_.currentContext_->GetLoadMegaICCache();
1049 }
1050
GetStoreMegaICCache() const1051 MegaICCache *JSThread::GetStoreMegaICCache() const
1052 {
1053 return glueData_.currentContext_->GetStoreMegaICCache();
1054 }
1055
1056
GetFirstGlobalConst() const1057 const GlobalEnvConstants *JSThread::GetFirstGlobalConst() const
1058 {
1059 return contexts_[0]->GlobalConstants();
1060 }
1061
IsAllContextsInitialized() const1062 bool JSThread::IsAllContextsInitialized() const
1063 {
1064 return contexts_.back()->IsInitialized();
1065 }
1066
IsReadyToUpdateDetector() const1067 bool JSThread::IsReadyToUpdateDetector() const
1068 {
1069 return !GetEnableLazyBuiltins() && IsAllContextsInitialized();
1070 }
1071
GetOrCreateRegExpCache()1072 Area *JSThread::GetOrCreateRegExpCache()
1073 {
1074 if (regExpCache_ == nullptr) {
1075 regExpCache_ = nativeAreaAllocator_->AllocateArea(MAX_REGEXP_CACHE_SIZE);
1076 }
1077 return regExpCache_;
1078 }
1079
InitializeBuiltinObject(const std::string & key)1080 void JSThread::InitializeBuiltinObject(const std::string& key)
1081 {
1082 BuiltinIndex& builtins = BuiltinIndex::GetInstance();
1083 auto index = builtins.GetBuiltinIndex(key);
1084 ASSERT(index != BuiltinIndex::NOT_FOUND);
1085 /*
1086 If using `auto globalObject = GetEcmaVM()->GetGlobalEnv()->GetGlobalObject()` here,
1087 it will cause incorrect result in multi-context environment. For example:
1088
1089 ```ts
1090 let obj = {};
1091 print(obj instanceof Object); // instead of true, will print false
1092 ```
1093 */
1094 auto globalObject = contexts_.back()->GetGlobalEnv()->GetGlobalObject();
1095 auto jsObject = JSHandle<JSObject>(this, globalObject);
1096 auto box = jsObject->GetGlobalPropertyBox(this, key);
1097 if (box == nullptr) {
1098 return;
1099 }
1100 auto& entry = glueData_.builtinEntries_.builtin_[index];
1101 entry.box_ = JSTaggedValue::Cast(box);
1102 auto builtin = JSHandle<JSObject>(this, box->GetValue());
1103 auto hclass = builtin->GetJSHClass();
1104 entry.hClass_ = JSTaggedValue::Cast(hclass);
1105 }
1106
InitializeBuiltinObject()1107 void JSThread::InitializeBuiltinObject()
1108 {
1109 BuiltinIndex& builtins = BuiltinIndex::GetInstance();
1110 for (auto key: builtins.GetBuiltinKeys()) {
1111 InitializeBuiltinObject(key);
1112 }
1113 }
1114
IsPropertyCacheCleared() const1115 bool JSThread::IsPropertyCacheCleared() const
1116 {
1117 for (EcmaContext *context : contexts_) {
1118 if (!context->GetPropertiesCache()->IsCleared()) {
1119 return false;
1120 }
1121 }
1122 return true;
1123 }
1124
UpdateState(ThreadState newState)1125 void JSThread::UpdateState(ThreadState newState)
1126 {
1127 ThreadState oldState = GetState();
1128 if (oldState == ThreadState::RUNNING && newState != ThreadState::RUNNING) {
1129 TransferFromRunningToSuspended(newState);
1130 } else if (oldState != ThreadState::RUNNING && newState == ThreadState::RUNNING) {
1131 TransferToRunning();
1132 } else {
1133 // Here can be some extra checks...
1134 StoreState(newState);
1135 }
1136 }
1137
SuspendThread(bool internalSuspend,SuspendBarrier * barrier)1138 void JSThread::SuspendThread(bool internalSuspend, SuspendBarrier* barrier)
1139 {
1140 LockHolder lock(suspendLock_);
1141 if (!internalSuspend) {
1142 // do smth here if we want to combine internal and external suspension
1143 }
1144
1145 uint32_t old_count = suspendCount_++;
1146 if (old_count == 0) {
1147 SetFlag(ThreadFlag::SUSPEND_REQUEST);
1148 SetCheckSafePointStatus();
1149 }
1150
1151 if (barrier != nullptr) {
1152 ASSERT(suspendBarrier_ == nullptr);
1153 suspendBarrier_ = barrier;
1154 SetFlag(ThreadFlag::ACTIVE_BARRIER);
1155 SetCheckSafePointStatus();
1156 }
1157 }
1158
ResumeThread(bool internalSuspend)1159 void JSThread::ResumeThread(bool internalSuspend)
1160 {
1161 LockHolder lock(suspendLock_);
1162 if (!internalSuspend) {
1163 // do smth here if we want to combine internal and external suspension
1164 }
1165 if (suspendCount_ > 0) {
1166 suspendCount_--;
1167 if (suspendCount_ == 0) {
1168 ClearFlag(ThreadFlag::SUSPEND_REQUEST);
1169 ResetCheckSafePointStatus();
1170 }
1171 }
1172 suspendCondVar_.Signal();
1173 }
1174
WaitSuspension()1175 void JSThread::WaitSuspension()
1176 {
1177 constexpr int TIMEOUT = 100;
1178 ThreadState oldState = GetState();
1179 UpdateState(ThreadState::IS_SUSPENDED);
1180 {
1181 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SuspendTime::WaitSuspension");
1182 LockHolder lock(suspendLock_);
1183 while (suspendCount_ > 0) {
1184 suspendCondVar_.TimedWait(&suspendLock_, TIMEOUT);
1185 // we need to do smth if Runtime is terminating at this point
1186 }
1187 ASSERT(!HasSuspendRequest());
1188 }
1189 UpdateState(oldState);
1190 }
1191
ManagedCodeBegin()1192 void JSThread::ManagedCodeBegin()
1193 {
1194 ASSERT(!IsInManagedState());
1195 UpdateState(ThreadState::RUNNING);
1196 }
1197
ManagedCodeEnd()1198 void JSThread::ManagedCodeEnd()
1199 {
1200 ASSERT(IsInManagedState());
1201 UpdateState(ThreadState::NATIVE);
1202 }
1203
TransferFromRunningToSuspended(ThreadState newState)1204 void JSThread::TransferFromRunningToSuspended(ThreadState newState)
1205 {
1206 ASSERT(currentThread == this);
1207 StoreSuspendedState(newState);
1208 CheckAndPassActiveBarrier();
1209 }
1210
UpdateStackInfo(void * stackInfo,StackInfoOpKind opKind)1211 void JSThread::UpdateStackInfo(void *stackInfo, StackInfoOpKind opKind)
1212 {
1213 switch (opKind) {
1214 case SwitchToSubStackInfo: {
1215 StackInfo *subStackInfo = reinterpret_cast<StackInfo*>(stackInfo);
1216 if (subStackInfo == nullptr) {
1217 LOG_ECMA(ERROR) << "fatal error, subStack not exist";
1218 break;
1219 }
1220 // process stackLimit
1221 mainStackInfo_.stackLimit = glueData_.stackLimit_;
1222 glueData_.stackLimit_ = subStackInfo->stackLimit;
1223 // process lastLeaveFrame
1224 mainStackInfo_.lastLeaveFrame = reinterpret_cast<uint64_t>(glueData_.leaveFrame_);
1225 glueData_.leaveFrame_ =
1226 reinterpret_cast<uint64_t *>(subStackInfo->lastLeaveFrame);
1227 isInSubStack_ = true;
1228
1229 LOG_ECMA(DEBUG) << "Switch to subStack: "
1230 << ", stack limit: " << glueData_.stackLimit_
1231 << ", stack lastLeaveFrame: " << glueData_.leaveFrame_;
1232 break;
1233 }
1234 case SwitchToMainStackInfo: {
1235 // process stackLimit
1236 glueData_.stackLimit_ = mainStackInfo_.stackLimit;
1237 // process lastLeaveFrame
1238 glueData_.leaveFrame_ = reinterpret_cast<uint64_t *>(mainStackInfo_.lastLeaveFrame);
1239 isInSubStack_ = false;
1240
1241 LOG_ECMA(DEBUG) << "Switch to mainStack: "
1242 << ", main stack limit: " << mainStackInfo_.stackLimit
1243 << ", main stack lastLeaveFrame: " << mainStackInfo_.lastLeaveFrame;
1244 break;
1245 }
1246 default:
1247 LOG_ECMA(FATAL) << "this branch is unreachable";
1248 UNREACHABLE();
1249 }
1250 }
1251
TransferToRunning()1252 void JSThread::TransferToRunning()
1253 {
1254 ASSERT(!IsDaemonThread());
1255 ASSERT(currentThread == this);
1256 StoreRunningState(ThreadState::RUNNING);
1257 // Invoke free weak global callback when thread switch to running
1258 if (!weakNodeFreeGlobalCallbacks_.empty()) {
1259 InvokeWeakNodeFreeGlobalCallBack();
1260 }
1261 if (fullMarkRequest_) {
1262 fullMarkRequest_ = const_cast<Heap*>(vm_->GetHeap())->TryTriggerFullMarkBySharedLimit();
1263 }
1264 }
1265
TransferDaemonThreadToRunning()1266 void JSThread::TransferDaemonThreadToRunning()
1267 {
1268 ASSERT(IsDaemonThread());
1269 ASSERT(currentThread == this);
1270 StoreRunningState(ThreadState::RUNNING);
1271 }
1272
StoreState(ThreadState newState)1273 inline void JSThread::StoreState(ThreadState newState)
1274 {
1275 while (true) {
1276 ThreadStateAndFlags oldStateAndFlags;
1277 oldStateAndFlags.asNonvolatileInt = glueData_.stateAndFlags_.asInt;
1278
1279 ThreadStateAndFlags newStateAndFlags;
1280 newStateAndFlags.asNonvolatileStruct.flags = oldStateAndFlags.asNonvolatileStruct.flags;
1281 newStateAndFlags.asNonvolatileStruct.state = newState;
1282
1283 bool done = glueData_.stateAndFlags_.asAtomicInt.compare_exchange_weak(oldStateAndFlags.asNonvolatileInt,
1284 newStateAndFlags.asNonvolatileInt,
1285 std::memory_order_release);
1286 if (LIKELY(done)) {
1287 break;
1288 }
1289 }
1290 }
1291
StoreRunningState(ThreadState newState)1292 void JSThread::StoreRunningState(ThreadState newState)
1293 {
1294 ASSERT(newState == ThreadState::RUNNING);
1295 while (true) {
1296 ThreadStateAndFlags oldStateAndFlags;
1297 oldStateAndFlags.asNonvolatileInt = glueData_.stateAndFlags_.asInt;
1298 ASSERT(oldStateAndFlags.asNonvolatileStruct.state != ThreadState::RUNNING);
1299
1300 if (LIKELY(oldStateAndFlags.asNonvolatileStruct.flags == ThreadFlag::NO_FLAGS)) {
1301 ThreadStateAndFlags newStateAndFlags;
1302 newStateAndFlags.asNonvolatileStruct.flags = oldStateAndFlags.asNonvolatileStruct.flags;
1303 newStateAndFlags.asNonvolatileStruct.state = newState;
1304
1305 if (glueData_.stateAndFlags_.asAtomicInt.compare_exchange_weak(oldStateAndFlags.asNonvolatileInt,
1306 newStateAndFlags.asNonvolatileInt,
1307 std::memory_order_release)) {
1308 break;
1309 }
1310 } else if ((oldStateAndFlags.asNonvolatileStruct.flags & ThreadFlag::ACTIVE_BARRIER) != 0) {
1311 PassSuspendBarrier();
1312 } else if ((oldStateAndFlags.asNonvolatileStruct.flags & ThreadFlag::SUSPEND_REQUEST) != 0) {
1313 constexpr int TIMEOUT = 100;
1314 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SuspendTime::StoreRunningState");
1315 LockHolder lock(suspendLock_);
1316 while (suspendCount_ > 0) {
1317 suspendCondVar_.TimedWait(&suspendLock_, TIMEOUT);
1318 }
1319 ASSERT(!HasSuspendRequest());
1320 }
1321 }
1322 }
1323
StoreSuspendedState(ThreadState newState)1324 inline void JSThread::StoreSuspendedState(ThreadState newState)
1325 {
1326 ASSERT(newState != ThreadState::RUNNING);
1327 StoreState(newState);
1328 }
1329
PostFork()1330 void JSThread::PostFork()
1331 {
1332 SetThreadId();
1333 if (currentThread == nullptr) {
1334 currentThread = this;
1335 ASSERT(GetState() == ThreadState::CREATED);
1336 UpdateState(ThreadState::NATIVE);
1337 } else {
1338 // We tried to call fork in the same thread
1339 ASSERT(currentThread == this);
1340 ASSERT(GetState() == ThreadState::NATIVE);
1341 }
1342 }
1343 #ifndef NDEBUG
IsInManagedState() const1344 bool JSThread::IsInManagedState() const
1345 {
1346 ASSERT(this == JSThread::GetCurrent());
1347 return GetState() == ThreadState::RUNNING;
1348 }
1349
GetMutatorLockState() const1350 MutatorLock::MutatorLockState JSThread::GetMutatorLockState() const
1351 {
1352 return mutatorLockState_;
1353 }
1354
SetMutatorLockState(MutatorLock::MutatorLockState newState)1355 void JSThread::SetMutatorLockState(MutatorLock::MutatorLockState newState)
1356 {
1357 mutatorLockState_ = newState;
1358 }
1359 #endif
1360 } // namespace panda::ecmascript
1361