1 /*
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ecmascript/js_thread.h"
17
18 #include "ecmascript/runtime.h"
19 #include "ecmascript/debugger/js_debugger_manager.h"
20 #include "ecmascript/js_date.h"
21 #include "ecmascript/js_object-inl.h"
22 #include "ecmascript/js_tagged_value.h"
23 #include "ecmascript/runtime_call_id.h"
24
25 #if !defined(PANDA_TARGET_WINDOWS) && !defined(PANDA_TARGET_MACOS) && !defined(PANDA_TARGET_IOS)
26 #include <sys/resource.h>
27 #endif
28
29 #if defined(ENABLE_EXCEPTION_BACKTRACE)
30 #include "ecmascript/platform/backtrace.h"
31 #endif
32 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
33 #include "ecmascript/dfx/cpu_profiler/cpu_profiler.h"
34 #endif
35 #include "ecmascript/dfx/vm_thread_control.h"
36 #include "ecmascript/ecma_global_storage.h"
37 #include "ecmascript/ic/properties_cache.h"
38 #include "ecmascript/interpreter/interpreter.h"
39 #include "ecmascript/mem/concurrent_marker.h"
40 #include "ecmascript/platform/file.h"
41 #include "ecmascript/jit/jit.h"
42
43 namespace panda::ecmascript {
44 using CommonStubCSigns = panda::ecmascript::kungfu::CommonStubCSigns;
45 using BytecodeStubCSigns = panda::ecmascript::kungfu::BytecodeStubCSigns;
46
47 thread_local JSThread *currentThread = nullptr;
48
GetCurrent()49 JSThread *JSThread::GetCurrent()
50 {
51 return currentThread;
52 }
53
54 // static
RegisterThread(JSThread * jsThread)55 void JSThread::RegisterThread(JSThread *jsThread)
56 {
57 Runtime::GetInstance()->RegisterThread(jsThread);
58 // If it is not true, we created a new thread for future fork
59 if (currentThread == nullptr) {
60 currentThread = jsThread;
61 jsThread->UpdateState(ThreadState::NATIVE);
62 }
63 }
64
UnregisterThread(JSThread * jsThread)65 void JSThread::UnregisterThread(JSThread *jsThread)
66 {
67 if (currentThread == jsThread) {
68 jsThread->UpdateState(ThreadState::TERMINATED);
69 currentThread = nullptr;
70 } else {
71 // We have created this JSThread instance but hadn't forked it.
72 ASSERT(jsThread->GetState() == ThreadState::CREATED);
73 jsThread->UpdateState(ThreadState::TERMINATED);
74 }
75 Runtime::GetInstance()->UnregisterThread(jsThread);
76 }
77
78 // static
Create(EcmaVM * vm)79 JSThread *JSThread::Create(EcmaVM *vm)
80 {
81 auto jsThread = new JSThread(vm);
82
83 AsmInterParsedOption asmInterOpt = vm->GetJSOptions().GetAsmInterParsedOption();
84 if (asmInterOpt.enableAsm) {
85 jsThread->EnableAsmInterpreter();
86 }
87
88 jsThread->nativeAreaAllocator_ = vm->GetNativeAreaAllocator();
89 jsThread->heapRegionAllocator_ = vm->GetHeapRegionAllocator();
90 // algin with 16
91 size_t maxStackSize = vm->GetEcmaParamConfiguration().GetMaxStackSize();
92 jsThread->glueData_.frameBase_ = static_cast<JSTaggedType *>(
93 vm->GetNativeAreaAllocator()->Allocate(sizeof(JSTaggedType) * maxStackSize));
94 jsThread->glueData_.currentFrame_ = jsThread->glueData_.frameBase_ + maxStackSize;
95 EcmaInterpreter::InitStackFrame(jsThread);
96
97 jsThread->glueData_.stackLimit_ = GetAsmStackLimit();
98 jsThread->glueData_.stackStart_ = GetCurrentStackPosition();
99 jsThread->glueData_.isEnableElementsKind_ = vm->IsEnableElementsKind();
100 jsThread->SetThreadId();
101
102 RegisterThread(jsThread);
103 return jsThread;
104 }
105
JSThread(EcmaVM * vm)106 JSThread::JSThread(EcmaVM *vm) : id_(os::thread::GetCurrentThreadId()), vm_(vm)
107 {
108 auto chunk = vm->GetChunk();
109 if (!vm_->GetJSOptions().EnableGlobalLeakCheck()) {
110 globalStorage_ = chunk->New<EcmaGlobalStorage<Node>>(this, vm->GetNativeAreaAllocator());
111 newGlobalHandle_ = [this](JSTaggedType value) { return globalStorage_->NewGlobalHandle(value); };
112 disposeGlobalHandle_ = [this](uintptr_t nodeAddr) { globalStorage_->DisposeGlobalHandle(nodeAddr); };
113 setWeak_ = [this](uintptr_t nodeAddr, void *ref, WeakClearCallback freeGlobalCallBack,
114 WeakClearCallback nativeFinalizeCallBack) {
115 return globalStorage_->SetWeak(nodeAddr, ref, freeGlobalCallBack, nativeFinalizeCallBack);
116 };
117 clearWeak_ = [this](uintptr_t nodeAddr) { return globalStorage_->ClearWeak(nodeAddr); };
118 isWeak_ = [this](uintptr_t addr) { return globalStorage_->IsWeak(addr); };
119 } else {
120 globalDebugStorage_ = chunk->New<EcmaGlobalStorage<DebugNode>>(this, vm->GetNativeAreaAllocator());
121 newGlobalHandle_ = [this](JSTaggedType value) { return globalDebugStorage_->NewGlobalHandle(value); };
122 disposeGlobalHandle_ = [this](uintptr_t nodeAddr) { globalDebugStorage_->DisposeGlobalHandle(nodeAddr); };
123 setWeak_ = [this](uintptr_t nodeAddr, void *ref, WeakClearCallback freeGlobalCallBack,
124 WeakClearCallback nativeFinalizeCallBack) {
125 return globalDebugStorage_->SetWeak(nodeAddr, ref, freeGlobalCallBack, nativeFinalizeCallBack);
126 };
127 clearWeak_ = [this](uintptr_t nodeAddr) { return globalDebugStorage_->ClearWeak(nodeAddr); };
128 isWeak_ = [this](uintptr_t addr) { return globalDebugStorage_->IsWeak(addr); };
129 }
130 vmThreadControl_ = new VmThreadControl(this);
131 SetBCStubStatus(BCStubStatus::NORMAL_BC_STUB);
132 dateUtils_ = new DateUtils();
133 }
134
JSThread(EcmaVM * vm,ThreadType threadType)135 JSThread::JSThread(EcmaVM *vm, ThreadType threadType) : id_(os::thread::GetCurrentThreadId()),
136 vm_(vm), threadType_(threadType)
137 {
138 ASSERT(threadType == ThreadType::JIT_THREAD);
139 // jit thread no need GCIterating
140 readyForGCIterating_ = false;
141 RegisterThread(this);
142 };
143
JSThread(ThreadType threadType)144 JSThread::JSThread(ThreadType threadType) : threadType_(threadType)
145 {
146 ASSERT(threadType == ThreadType::DAEMON_THREAD);
147 // daemon thread no need GCIterating
148 readyForGCIterating_ = false;
149 }
150
~JSThread()151 JSThread::~JSThread()
152 {
153 readyForGCIterating_ = false;
154 if (globalStorage_ != nullptr) {
155 GetEcmaVM()->GetChunk()->Delete(globalStorage_);
156 globalStorage_ = nullptr;
157 }
158 if (globalDebugStorage_ != nullptr) {
159 GetEcmaVM()->GetChunk()->Delete(globalDebugStorage_);
160 globalDebugStorage_ = nullptr;
161 }
162
163 for (auto item : contexts_) {
164 GetNativeAreaAllocator()->Free(item->GetFrameBase(), sizeof(JSTaggedType) *
165 vm_->GetEcmaParamConfiguration().GetMaxStackSize());
166 item->SetFrameBase(nullptr);
167 delete item;
168 }
169 contexts_.clear();
170 GetNativeAreaAllocator()->FreeArea(regExpCache_);
171
172 glueData_.frameBase_ = nullptr;
173 nativeAreaAllocator_ = nullptr;
174 heapRegionAllocator_ = nullptr;
175 regExpCache_ = nullptr;
176 if (vmThreadControl_ != nullptr) {
177 delete vmThreadControl_;
178 vmThreadControl_ = nullptr;
179 }
180 // DaemonThread will be unregistered when the binding std::thread release.
181 if (!IsDaemonThread()) {
182 UnregisterThread(this);
183 }
184 if (dateUtils_ != nullptr) {
185 delete dateUtils_;
186 dateUtils_ = nullptr;
187 }
188 }
189
SetException(JSTaggedValue exception)190 void JSThread::SetException(JSTaggedValue exception)
191 {
192 glueData_.exception_ = exception;
193 #if defined(ENABLE_EXCEPTION_BACKTRACE)
194 if (vm_->GetJSOptions().EnableExceptionBacktrace()) {
195 LOG_ECMA(INFO) << "SetException:" << exception.GetRawData();
196 std::ostringstream stack;
197 Backtrace(stack);
198 LOG_ECMA(INFO) << stack.str();
199 }
200 #endif
201 }
202
ClearException()203 void JSThread::ClearException()
204 {
205 glueData_.exception_ = JSTaggedValue::Hole();
206 }
207
GetCurrentLexenv() const208 JSTaggedValue JSThread::GetCurrentLexenv() const
209 {
210 FrameHandler frameHandler(this);
211 return frameHandler.GetEnv();
212 }
213
GetCurrentFunction() const214 JSTaggedValue JSThread::GetCurrentFunction() const
215 {
216 FrameHandler frameHandler(this);
217 return frameHandler.GetFunction();
218 }
219
GetCurrentFrame() const220 const JSTaggedType *JSThread::GetCurrentFrame() const
221 {
222 if (IsAsmInterpreter()) {
223 return GetLastLeaveFrame();
224 }
225 return GetCurrentSPFrame();
226 }
227
SetCurrentFrame(JSTaggedType * sp)228 void JSThread::SetCurrentFrame(JSTaggedType *sp)
229 {
230 if (IsAsmInterpreter()) {
231 return SetLastLeaveFrame(sp);
232 }
233 return SetCurrentSPFrame(sp);
234 }
235
GetCurrentInterpretedFrame() const236 const JSTaggedType *JSThread::GetCurrentInterpretedFrame() const
237 {
238 if (IsAsmInterpreter()) {
239 auto frameHandler = FrameHandler(this);
240 return frameHandler.GetSp();
241 }
242 return GetCurrentSPFrame();
243 }
244
InvokeWeakNodeFreeGlobalCallBack()245 void JSThread::InvokeWeakNodeFreeGlobalCallBack()
246 {
247 while (!weakNodeFreeGlobalCallbacks_.empty()) {
248 auto callbackPair = weakNodeFreeGlobalCallbacks_.back();
249 weakNodeFreeGlobalCallbacks_.pop_back();
250 ASSERT(callbackPair.first != nullptr && callbackPair.second != nullptr);
251 auto callback = callbackPair.first;
252 (*callback)(callbackPair.second);
253 }
254 }
255
InvokeSharedNativePointerCallbacks()256 void JSThread::InvokeSharedNativePointerCallbacks()
257 {
258 auto &callbacks = vm_->GetSharedNativePointerCallbacks();
259 while (!callbacks.empty()) {
260 auto callbackPair = callbacks.back();
261 callbacks.pop_back();
262 ASSERT(callbackPair.first != nullptr && callbackPair.second.first != nullptr &&
263 callbackPair.second.second != nullptr);
264 auto callback = callbackPair.first;
265 (*callback)(env_, callbackPair.second.first, callbackPair.second.second);
266 }
267 }
268
InvokeWeakNodeNativeFinalizeCallback()269 void JSThread::InvokeWeakNodeNativeFinalizeCallback()
270 {
271 // the second callback may lead to another GC, if this, return directly;
272 if (runningNativeFinalizeCallbacks_) {
273 return;
274 }
275 runningNativeFinalizeCallbacks_ = true;
276 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "InvokeNativeFinalizeCallbacks num:"
277 + std::to_string(weakNodeNativeFinalizeCallbacks_.size()));
278 while (!weakNodeNativeFinalizeCallbacks_.empty()) {
279 auto callbackPair = weakNodeNativeFinalizeCallbacks_.back();
280 weakNodeNativeFinalizeCallbacks_.pop_back();
281 ASSERT(callbackPair.first != nullptr && callbackPair.second != nullptr);
282 auto callback = callbackPair.first;
283 (*callback)(callbackPair.second);
284 }
285 if (finalizeTaskCallback_ != nullptr) {
286 finalizeTaskCallback_();
287 }
288 runningNativeFinalizeCallbacks_ = false;
289 }
290
IsStartGlobalLeakCheck() const291 bool JSThread::IsStartGlobalLeakCheck() const
292 {
293 return GetEcmaVM()->GetJSOptions().IsStartGlobalLeakCheck();
294 }
295
EnableGlobalObjectLeakCheck() const296 bool JSThread::EnableGlobalObjectLeakCheck() const
297 {
298 return GetEcmaVM()->GetJSOptions().EnableGlobalObjectLeakCheck();
299 }
300
EnableGlobalPrimitiveLeakCheck() const301 bool JSThread::EnableGlobalPrimitiveLeakCheck() const
302 {
303 return GetEcmaVM()->GetJSOptions().EnableGlobalPrimitiveLeakCheck();
304 }
305
IsInRunningStateOrProfiling() const306 bool JSThread::IsInRunningStateOrProfiling() const
307 {
308 bool result = IsInRunningState();
309 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
310 result |= vm_->GetHeapProfile() != nullptr;
311 #endif
312 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
313 result |= GetIsProfiling();
314 #endif
315 return result;
316 }
317
WriteToStackTraceFd(std::ostringstream & buffer) const318 void JSThread::WriteToStackTraceFd(std::ostringstream &buffer) const
319 {
320 if (stackTraceFd_ < 0) {
321 return;
322 }
323 buffer << std::endl;
324 DPrintf(reinterpret_cast<fd_t>(stackTraceFd_), buffer.str());
325 buffer.str("");
326 }
327
SetStackTraceFd(int32_t fd)328 void JSThread::SetStackTraceFd(int32_t fd)
329 {
330 stackTraceFd_ = fd;
331 }
332
CloseStackTraceFd()333 void JSThread::CloseStackTraceFd()
334 {
335 if (stackTraceFd_ != -1) {
336 FSync(reinterpret_cast<fd_t>(stackTraceFd_));
337 Close(reinterpret_cast<fd_t>(stackTraceFd_));
338 stackTraceFd_ = -1;
339 }
340 }
341
SetJitCodeMap(JSTaggedType exception,MachineCode * machineCode,std::string & methodName,uintptr_t offset)342 void JSThread::SetJitCodeMap(JSTaggedType exception, MachineCode* machineCode, std::string &methodName,
343 uintptr_t offset)
344 {
345 auto it = jitCodeMaps_.find(exception);
346 if (it != jitCodeMaps_.end()) {
347 it->second->push_back(std::make_tuple(machineCode, methodName, offset));
348 } else {
349 JitCodeVector *jitCode = new JitCodeVector {std::make_tuple(machineCode, methodName, offset)};
350 jitCodeMaps_.emplace(exception, jitCode);
351 }
352 }
353
Iterate(const RootVisitor & visitor,const RootRangeVisitor & rangeVisitor,const RootBaseAndDerivedVisitor & derivedVisitor)354 void JSThread::Iterate(const RootVisitor &visitor, const RootRangeVisitor &rangeVisitor,
355 const RootBaseAndDerivedVisitor &derivedVisitor)
356 {
357 if (!glueData_.exception_.IsHole()) {
358 visitor(Root::ROOT_VM, ObjectSlot(ToUintPtr(&glueData_.exception_)));
359 }
360 rangeVisitor(
361 Root::ROOT_VM, ObjectSlot(glueData_.builtinEntries_.Begin()), ObjectSlot(glueData_.builtinEntries_.End()));
362
363 EcmaContext *tempContext = glueData_.currentContext_;
364 for (EcmaContext *context : contexts_) {
365 // visit stack roots
366 SwitchCurrentContext(context, true);
367 FrameHandler frameHandler(this);
368 frameHandler.Iterate(visitor, rangeVisitor, derivedVisitor);
369 context->Iterate(visitor, rangeVisitor);
370 }
371 SwitchCurrentContext(tempContext, true);
372 // visit tagged handle storage roots
373 if (vm_->GetJSOptions().EnableGlobalLeakCheck()) {
374 IterateHandleWithCheck(visitor, rangeVisitor);
375 } else {
376 size_t globalCount = 0;
377 globalStorage_->IterateUsageGlobal([visitor, &globalCount](Node *node) {
378 JSTaggedValue value(node->GetObject());
379 if (value.IsHeapObject()) {
380 visitor(ecmascript::Root::ROOT_HANDLE, ecmascript::ObjectSlot(node->GetObjectAddress()));
381 }
382 globalCount++;
383 });
384 static bool hasCheckedGlobalCount = false;
385 static const size_t WARN_GLOBAL_COUNT = 100000;
386 if (!hasCheckedGlobalCount && globalCount >= WARN_GLOBAL_COUNT) {
387 LOG_ECMA(WARN) << "Global reference count is " << globalCount << ",It exceed the upper limit 100000!";
388 hasCheckedGlobalCount = true;
389 }
390 }
391 }
IterateJitCodeMap(const JitCodeMapVisitor & jitCodeMapVisitor)392 void JSThread::IterateJitCodeMap(const JitCodeMapVisitor &jitCodeMapVisitor)
393 {
394 jitCodeMapVisitor(jitCodeMaps_);
395 }
396
IterateHandleWithCheck(const RootVisitor & visitor,const RootRangeVisitor & rangeVisitor)397 void JSThread::IterateHandleWithCheck(const RootVisitor &visitor, const RootRangeVisitor &rangeVisitor)
398 {
399 size_t handleCount = 0;
400 for (EcmaContext *context : contexts_) {
401 handleCount += context->IterateHandle(rangeVisitor);
402 }
403
404 size_t globalCount = 0;
405 static const int JS_TYPE_LAST = static_cast<int>(JSType::TYPE_LAST);
406 int typeCount[JS_TYPE_LAST] = { 0 };
407 int primitiveCount = 0;
408 bool isStopObjectLeakCheck = EnableGlobalObjectLeakCheck() && !IsStartGlobalLeakCheck() && stackTraceFd_ > 0;
409 bool isStopPrimitiveLeakCheck = EnableGlobalPrimitiveLeakCheck() && !IsStartGlobalLeakCheck() && stackTraceFd_ > 0;
410 std::ostringstream buffer;
411 globalDebugStorage_->IterateUsageGlobal([this, visitor, &globalCount, &typeCount, &primitiveCount,
412 isStopObjectLeakCheck, isStopPrimitiveLeakCheck, &buffer](DebugNode *node) {
413 node->MarkCount();
414 JSTaggedValue value(node->GetObject());
415 if (value.IsHeapObject()) {
416 visitor(ecmascript::Root::ROOT_HANDLE, ecmascript::ObjectSlot(node->GetObjectAddress()));
417 TaggedObject *object = value.GetTaggedObject();
418 MarkWord word(value.GetTaggedObject());
419 if (word.IsForwardingAddress()) {
420 object = word.ToForwardingAddress();
421 }
422 typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
423
424 // Print global information about possible memory leaks.
425 // You can print the global new stack within the range of the leaked global number.
426 if (isStopObjectLeakCheck && node->GetGlobalNumber() > 0 && node->GetMarkCount() > 0) {
427 buffer << "Global maybe leak object address:" << std::hex << object <<
428 ", type:" << JSHClass::DumpJSType(JSType(object->GetClass()->GetObjectType())) <<
429 ", node address:" << node << ", number:" << std::dec << node->GetGlobalNumber() <<
430 ", markCount:" << node->GetMarkCount();
431 WriteToStackTraceFd(buffer);
432 }
433 } else {
434 primitiveCount++;
435 if (isStopPrimitiveLeakCheck && node->GetGlobalNumber() > 0 && node->GetMarkCount() > 0) {
436 buffer << "Global maybe leak primitive:" << std::hex << value.GetRawData() <<
437 ", node address:" << node << ", number:" << std::dec << node->GetGlobalNumber() <<
438 ", markCount:" << node->GetMarkCount();
439 WriteToStackTraceFd(buffer);
440 }
441 }
442 globalCount++;
443 });
444
445 if (isStopObjectLeakCheck || isStopPrimitiveLeakCheck) {
446 buffer << "Global leak check success!";
447 WriteToStackTraceFd(buffer);
448 CloseStackTraceFd();
449 }
450 // Determine whether memory leakage by checking handle and global count.
451 LOG_ECMA(INFO) << "Iterate root handle count:" << handleCount << ", global handle count:" << globalCount;
452 OPTIONAL_LOG(GetEcmaVM(), INFO) << "Global type Primitive count:" << primitiveCount;
453 // Print global object type statistic.
454 static const int MIN_COUNT_THRESHOLD = 50;
455 for (int i = 0; i < JS_TYPE_LAST; i++) {
456 if (typeCount[i] > MIN_COUNT_THRESHOLD) {
457 OPTIONAL_LOG(GetEcmaVM(), INFO) << "Global type " << JSHClass::DumpJSType(JSType(i))
458 << " count:" << typeCount[i];
459 }
460 }
461 }
462
IterateWeakEcmaGlobalStorage(const WeakRootVisitor & visitor,GCKind gcKind)463 void JSThread::IterateWeakEcmaGlobalStorage(const WeakRootVisitor &visitor, GCKind gcKind)
464 {
465 auto callBack = [this, visitor, gcKind](WeakNode *node) {
466 JSTaggedValue value(node->GetObject());
467 if (!value.IsHeapObject()) {
468 return;
469 }
470 auto object = value.GetTaggedObject();
471 auto fwd = visitor(object);
472 if (fwd == nullptr) {
473 // undefind
474 node->SetObject(JSTaggedValue::Undefined().GetRawData());
475 auto nativeFinalizeCallback = node->GetNativeFinalizeCallback();
476 if (nativeFinalizeCallback) {
477 weakNodeNativeFinalizeCallbacks_.push_back(std::make_pair(nativeFinalizeCallback,
478 node->GetReference()));
479 }
480 auto freeGlobalCallBack = node->GetFreeGlobalCallback();
481 if (!freeGlobalCallBack) {
482 // If no callback, dispose global immediately
483 DisposeGlobalHandle(ToUintPtr(node));
484 } else if (gcKind == GCKind::SHARED_GC) {
485 // For shared GC, free global should defer execute in its own thread
486 weakNodeFreeGlobalCallbacks_.push_back(std::make_pair(freeGlobalCallBack, node->GetReference()));
487 } else {
488 node->CallFreeGlobalCallback();
489 }
490 } else if (fwd != object) {
491 // update
492 node->SetObject(JSTaggedValue(fwd).GetRawData());
493 }
494 };
495 if (!vm_->GetJSOptions().EnableGlobalLeakCheck()) {
496 globalStorage_->IterateWeakUsageGlobal(callBack);
497 } else {
498 globalDebugStorage_->IterateWeakUsageGlobal(callBack);
499 }
500 }
501
UpdateJitCodeMapReference(const WeakRootVisitor & visitor)502 void JSThread::UpdateJitCodeMapReference(const WeakRootVisitor &visitor)
503 {
504 auto it = jitCodeMaps_.begin();
505 while (it != jitCodeMaps_.end()) {
506 auto obj = reinterpret_cast<TaggedObject *>(it->first);
507 auto fwd = visitor(obj);
508 if (fwd == nullptr) {
509 delete it->second;
510 it = jitCodeMaps_.erase(it);
511 } else if (fwd != obj) {
512 jitCodeMaps_.emplace(JSTaggedValue(fwd).GetRawData(), it->second);
513 it = jitCodeMaps_.erase(it);
514 } else {
515 ++it;
516 }
517 }
518 }
519
DoStackOverflowCheck(const JSTaggedType * sp)520 bool JSThread::DoStackOverflowCheck(const JSTaggedType *sp)
521 {
522 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
523 if (UNLIKELY(!IsCrossThreadExecutionEnable() && sp <= glueData_.frameBase_ + RESERVE_STACK_SIZE)) {
524 vm_->CheckThread();
525 LOG_ECMA(ERROR) << "Stack overflow! Remaining stack size is: " << (sp - glueData_.frameBase_);
526 if (LIKELY(!HasPendingException())) {
527 ObjectFactory *factory = GetEcmaVM()->GetFactory();
528 JSHandle<JSObject> error = factory->GetJSError(base::ErrorType::RANGE_ERROR,
529 "Stack overflow!", StackCheck::NO);
530 SetException(error.GetTaggedValue());
531 }
532 return true;
533 }
534 return false;
535 }
536
DoStackLimitCheck()537 bool JSThread::DoStackLimitCheck()
538 {
539 if (UNLIKELY(!IsCrossThreadExecutionEnable() && GetCurrentStackPosition() < GetStackLimit())) {
540 vm_->CheckThread();
541 LOG_ECMA(ERROR) << "Stack overflow! current:" << GetCurrentStackPosition() << " limit:" << GetStackLimit();
542 if (LIKELY(!HasPendingException())) {
543 ObjectFactory *factory = GetEcmaVM()->GetFactory();
544 JSHandle<JSObject> error = factory->GetJSError(base::ErrorType::RANGE_ERROR,
545 "Stack overflow!", StackCheck::NO);
546 SetException(error.GetTaggedValue());
547 }
548 return true;
549 }
550 return false;
551 }
552
ExpandHandleStorage()553 uintptr_t *JSThread::ExpandHandleStorage()
554 {
555 return GetCurrentEcmaContext()->ExpandHandleStorage();
556 }
557
ShrinkHandleStorage(int prevIndex)558 void JSThread::ShrinkHandleStorage(int prevIndex)
559 {
560 GetCurrentEcmaContext()->ShrinkHandleStorage(prevIndex);
561 }
562
NotifyStableArrayElementsGuardians(JSHandle<JSObject> receiver,StableArrayChangeKind changeKind)563 void JSThread::NotifyStableArrayElementsGuardians(JSHandle<JSObject> receiver, StableArrayChangeKind changeKind)
564 {
565 if (!glueData_.stableArrayElementsGuardians_) {
566 return;
567 }
568 if (!receiver->GetJSHClass()->IsPrototype() && !receiver->IsJSArray()) {
569 return;
570 }
571 auto env = GetEcmaVM()->GetGlobalEnv();
572 if (receiver.GetTaggedValue() == env->GetObjectFunctionPrototype().GetTaggedValue() ||
573 receiver.GetTaggedValue() == env->GetArrayPrototype().GetTaggedValue()) {
574 glueData_.stableArrayElementsGuardians_ = false;
575 return;
576 }
577 if (changeKind == StableArrayChangeKind::PROTO && receiver->IsJSArray()) {
578 glueData_.stableArrayElementsGuardians_ = false;
579 }
580 }
581
ResetGuardians()582 void JSThread::ResetGuardians()
583 {
584 glueData_.stableArrayElementsGuardians_ = true;
585 }
586
SetInitialBuiltinHClass(BuiltinTypeId type,JSHClass * builtinHClass,JSHClass * instanceHClass,JSHClass * prototypeHClass,JSHClass * prototypeOfPrototypeHClass,JSHClass * extraHClass)587 void JSThread::SetInitialBuiltinHClass(
588 BuiltinTypeId type, JSHClass *builtinHClass, JSHClass *instanceHClass,
589 JSHClass *prototypeHClass, JSHClass *prototypeOfPrototypeHClass, JSHClass *extraHClass)
590 {
591 size_t index = BuiltinHClassEntries::GetEntryIndex(type);
592 auto &entry = glueData_.builtinHClassEntries_.entries[index];
593 LOG_ECMA(DEBUG) << "JSThread::SetInitialBuiltinHClass: "
594 << "Builtin = " << ToString(type)
595 << ", builtinHClass = " << builtinHClass
596 << ", instanceHClass = " << instanceHClass
597 << ", prototypeHClass = " << prototypeHClass
598 << ", prototypeOfPrototypeHClass = " << prototypeOfPrototypeHClass
599 << ", extraHClass = " << extraHClass;
600 entry.builtinHClass = builtinHClass;
601 entry.instanceHClass = instanceHClass;
602 entry.prototypeHClass = prototypeHClass;
603 entry.prototypeOfPrototypeHClass = prototypeOfPrototypeHClass;
604 entry.extraHClass = extraHClass;
605 }
606
SetInitialBuiltinGlobalHClass(JSHClass * builtinHClass,GlobalIndex globalIndex)607 void JSThread::SetInitialBuiltinGlobalHClass(
608 JSHClass *builtinHClass, GlobalIndex globalIndex)
609 {
610 auto &map = ctorHclassEntries_;
611 map[builtinHClass] = globalIndex;
612 }
613
GetBuiltinHClass(BuiltinTypeId type) const614 JSHClass *JSThread::GetBuiltinHClass(BuiltinTypeId type) const
615 {
616 size_t index = BuiltinHClassEntries::GetEntryIndex(type);
617 return glueData_.builtinHClassEntries_.entries[index].builtinHClass;
618 }
619
GetBuiltinInstanceHClass(BuiltinTypeId type) const620 JSHClass *JSThread::GetBuiltinInstanceHClass(BuiltinTypeId type) const
621 {
622 size_t index = BuiltinHClassEntries::GetEntryIndex(type);
623 return glueData_.builtinHClassEntries_.entries[index].instanceHClass;
624 }
625
GetBuiltinExtraHClass(BuiltinTypeId type) const626 JSHClass *JSThread::GetBuiltinExtraHClass(BuiltinTypeId type) const
627 {
628 size_t index = BuiltinHClassEntries::GetEntryIndex(type);
629 return glueData_.builtinHClassEntries_.entries[index].extraHClass;
630 }
631
GetArrayInstanceHClass(ElementsKind kind,bool isPrototype) const632 JSHClass *JSThread::GetArrayInstanceHClass(ElementsKind kind, bool isPrototype) const
633 {
634 auto iter = GetArrayHClassIndexMap().find(kind);
635 ASSERT(iter != GetArrayHClassIndexMap().end());
636 auto index = isPrototype ? static_cast<size_t>(iter->second.second) : static_cast<size_t>(iter->second.first);
637 auto exceptArrayHClass = GlobalConstants()->GetGlobalConstantObject(index);
638 auto exceptRecvHClass = JSHClass::Cast(exceptArrayHClass.GetTaggedObject());
639 ASSERT(exceptRecvHClass->IsJSArray());
640 return exceptRecvHClass;
641 }
642
GetBuiltinPrototypeHClass(BuiltinTypeId type) const643 JSHClass *JSThread::GetBuiltinPrototypeHClass(BuiltinTypeId type) const
644 {
645 size_t index = BuiltinHClassEntries::GetEntryIndex(type);
646 return glueData_.builtinHClassEntries_.entries[index].prototypeHClass;
647 }
648
GetBuiltinPrototypeOfPrototypeHClass(BuiltinTypeId type) const649 JSHClass *JSThread::GetBuiltinPrototypeOfPrototypeHClass(BuiltinTypeId type) const
650 {
651 size_t index = BuiltinHClassEntries::GetEntryIndex(type);
652 return glueData_.builtinHClassEntries_.entries[index].prototypeOfPrototypeHClass;
653 }
654
GetBuiltinHClassOffset(BuiltinTypeId type,bool isArch32)655 size_t JSThread::GetBuiltinHClassOffset(BuiltinTypeId type, bool isArch32)
656 {
657 return GetGlueDataOffset() + GlueData::GetBuiltinHClassOffset(type, isArch32);
658 }
659
GetBuiltinPrototypeHClassOffset(BuiltinTypeId type,bool isArch32)660 size_t JSThread::GetBuiltinPrototypeHClassOffset(BuiltinTypeId type, bool isArch32)
661 {
662 return GetGlueDataOffset() + GlueData::GetBuiltinPrototypeHClassOffset(type, isArch32);
663 }
664
CheckSwitchDebuggerBCStub()665 void JSThread::CheckSwitchDebuggerBCStub()
666 {
667 auto isDebug = GetEcmaVM()->GetJsDebuggerManager()->IsDebugMode();
668 if (LIKELY(!isDebug)) {
669 if (glueData_.bcStubEntries_.Get(0) == glueData_.bcStubEntries_.Get(1)) {
670 for (size_t i = 0; i < BCStubEntries::BC_HANDLER_COUNT; i++) {
671 auto stubEntry = glueData_.bcDebuggerStubEntries_.Get(i);
672 auto debuggerStubEbtry = glueData_.bcStubEntries_.Get(i);
673 glueData_.bcStubEntries_.Set(i, stubEntry);
674 glueData_.bcDebuggerStubEntries_.Set(i, debuggerStubEbtry);
675 }
676 }
677 } else {
678 if (glueData_.bcDebuggerStubEntries_.Get(0) == glueData_.bcDebuggerStubEntries_.Get(1)) {
679 for (size_t i = 0; i < BCStubEntries::BC_HANDLER_COUNT; i++) {
680 auto stubEntry = glueData_.bcStubEntries_.Get(i);
681 auto debuggerStubEbtry = glueData_.bcDebuggerStubEntries_.Get(i);
682 glueData_.bcDebuggerStubEntries_.Set(i, stubEntry);
683 glueData_.bcStubEntries_.Set(i, debuggerStubEbtry);
684 }
685 }
686 }
687 }
688
CheckOrSwitchPGOStubs()689 void JSThread::CheckOrSwitchPGOStubs()
690 {
691 bool isSwitch = false;
692 if (IsPGOProfilerEnable()) {
693 if (GetBCStubStatus() == BCStubStatus::NORMAL_BC_STUB) {
694 SetBCStubStatus(BCStubStatus::PROFILE_BC_STUB);
695 isSwitch = true;
696 }
697 } else {
698 if (GetBCStubStatus() == BCStubStatus::PROFILE_BC_STUB) {
699 SetBCStubStatus(BCStubStatus::NORMAL_BC_STUB);
700 isSwitch = true;
701 }
702 }
703 if (isSwitch) {
704 Address curAddress;
705 #define SWITCH_PGO_STUB_ENTRY(fromName, toName, ...) \
706 curAddress = GetBCStubEntry(BytecodeStubCSigns::ID_##fromName); \
707 SetBCStubEntry(BytecodeStubCSigns::ID_##fromName, GetBCStubEntry(BytecodeStubCSigns::ID_##toName)); \
708 SetBCStubEntry(BytecodeStubCSigns::ID_##toName, curAddress);
709 ASM_INTERPRETER_BC_PROFILER_STUB_LIST(SWITCH_PGO_STUB_ENTRY)
710 #undef SWITCH_PGO_STUB_ENTRY
711 }
712 }
713
SwitchJitProfileStubs(bool isEnablePgo)714 void JSThread::SwitchJitProfileStubs(bool isEnablePgo)
715 {
716 if (isEnablePgo) {
717 SetPGOProfilerEnable(true);
718 CheckOrSwitchPGOStubs();
719 return;
720 }
721 bool isSwitch = false;
722 if (GetBCStubStatus() == BCStubStatus::NORMAL_BC_STUB) {
723 SetBCStubStatus(BCStubStatus::JIT_PROFILE_BC_STUB);
724 isSwitch = true;
725 }
726 if (isSwitch) {
727 Address curAddress;
728 #define SWITCH_PGO_STUB_ENTRY(fromName, toName, ...) \
729 curAddress = GetBCStubEntry(BytecodeStubCSigns::ID_##fromName); \
730 SetBCStubEntry(BytecodeStubCSigns::ID_##fromName, GetBCStubEntry(BytecodeStubCSigns::ID_##toName)); \
731 SetBCStubEntry(BytecodeStubCSigns::ID_##toName, curAddress);
732 ASM_INTERPRETER_BC_JIT_PROFILER_STUB_LIST(SWITCH_PGO_STUB_ENTRY)
733 #undef SWITCH_PGO_STUB_ENTRY
734 }
735 }
736
TerminateExecution()737 void JSThread::TerminateExecution()
738 {
739 // set the TERMINATE_ERROR to exception
740 ObjectFactory *factory = GetEcmaVM()->GetFactory();
741 JSHandle<JSObject> error = factory->GetJSError(ErrorType::TERMINATION_ERROR,
742 "Terminate execution!", StackCheck::NO);
743 SetException(error.GetTaggedValue());
744 }
745
CheckAndPassActiveBarrier()746 void JSThread::CheckAndPassActiveBarrier()
747 {
748 ThreadStateAndFlags oldStateAndFlags;
749 oldStateAndFlags.asInt = glueData_.stateAndFlags_.asInt;
750 if ((oldStateAndFlags.asStruct.flags & ThreadFlag::ACTIVE_BARRIER) != 0) {
751 PassSuspendBarrier();
752 }
753 }
754
PassSuspendBarrier()755 bool JSThread::PassSuspendBarrier()
756 {
757 // Use suspendLock_ to avoid data-race between suspend-all-thread and suspended-threads.
758 LockHolder lock(suspendLock_);
759 if (suspendBarrier_ != nullptr) {
760 suspendBarrier_->PassStrongly();
761 suspendBarrier_ = nullptr;
762 ClearFlag(ThreadFlag::ACTIVE_BARRIER);
763 return true;
764 }
765 return false;
766 }
767
ShouldHandleMarkingFinishedInSafepoint()768 bool JSThread::ShouldHandleMarkingFinishedInSafepoint()
769 {
770 auto heap = const_cast<Heap *>(GetEcmaVM()->GetHeap());
771 return IsMarkFinished() && heap->GetConcurrentMarker()->IsTriggeredConcurrentMark() &&
772 !heap->GetOnSerializeEvent() && !heap->InSensitiveStatus() && !heap->CheckIfNeedStopCollectionByStartup();
773 }
774
CheckSafepoint()775 bool JSThread::CheckSafepoint()
776 {
777 ResetCheckSafePointStatus();
778
779 if (HasTerminationRequest()) {
780 TerminateExecution();
781 SetVMTerminated(true);
782 SetTerminationRequest(false);
783 }
784
785 if (HasSuspendRequest()) {
786 WaitSuspension();
787 }
788
789 // vmThreadControl_ 's thread_ is current JSThread's this.
790 if (VMNeedSuspension()) {
791 vmThreadControl_->SuspendVM();
792 }
793 if (HasInstallMachineCode()) {
794 vm_->GetJit()->InstallTasks(this);
795 SetInstallMachineCode(false);
796 }
797
798 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
799 if (needProfiling_.load() && !isProfiling_) {
800 DFXJSNApi::StartCpuProfilerForFile(vm_, profileName_, CpuProfiler::INTERVAL_OF_INNER_START);
801 SetNeedProfiling(false);
802 }
803 #endif // ECMASCRIPT_SUPPORT_CPUPROFILER
804 bool gcTriggered = false;
805 #ifndef NDEBUG
806 if (vm_->GetJSOptions().EnableForceGC()) {
807 GetEcmaVM()->CollectGarbage(TriggerGCType::FULL_GC);
808 gcTriggered = true;
809 }
810 #endif
811 auto heap = const_cast<Heap *>(GetEcmaVM()->GetHeap());
812 // Handle exit app senstive scene
813 heap->HandleExitHighSensitiveEvent();
814
815 // Do not trigger local gc during the shared gc processRset process.
816 if (IsProcessingLocalToSharedRset()) {
817 return false;
818 }
819 // After concurrent mark finish, should trigger gc here to avoid create much floating garbage
820 // except in serialize or high sensitive event
821 if (ShouldHandleMarkingFinishedInSafepoint()) {
822 heap->SetCanThrowOOMError(false);
823 heap->GetConcurrentMarker()->HandleMarkingFinished();
824 heap->SetCanThrowOOMError(true);
825 gcTriggered = true;
826 }
827 return gcTriggered;
828 }
829
CheckJSTaggedType(JSTaggedType value) const830 void JSThread::CheckJSTaggedType(JSTaggedType value) const
831 {
832 if (JSTaggedValue(value).IsHeapObject() &&
833 !GetEcmaVM()->GetHeap()->IsAlive(reinterpret_cast<TaggedObject *>(value))) {
834 LOG_FULL(FATAL) << "value:" << value << " is invalid!";
835 }
836 }
837
CpuProfilerCheckJSTaggedType(JSTaggedType value) const838 bool JSThread::CpuProfilerCheckJSTaggedType(JSTaggedType value) const
839 {
840 if (JSTaggedValue(value).IsHeapObject() &&
841 !GetEcmaVM()->GetHeap()->IsAlive(reinterpret_cast<TaggedObject *>(value))) {
842 return false;
843 }
844 return true;
845 }
846
847 // static
GetAsmStackLimit()848 size_t JSThread::GetAsmStackLimit()
849 {
850 #if !defined(PANDA_TARGET_WINDOWS) && !defined(PANDA_TARGET_MACOS) && !defined(PANDA_TARGET_IOS)
851 // js stack limit
852 size_t result = GetCurrentStackPosition() - EcmaParamConfiguration::GetDefalutStackSize();
853 int ret = -1;
854 void *stackAddr = nullptr;
855 size_t size = 0;
856 #if defined(ENABLE_FFRT_INTERFACES)
857 if (!ffrt_get_current_coroutine_stack(&stackAddr, &size)) {
858 pthread_attr_t attr;
859 ret = pthread_getattr_np(pthread_self(), &attr);
860 if (ret != 0) {
861 LOG_ECMA(ERROR) << "Get current thread attr failed";
862 return result;
863 }
864 ret = pthread_attr_getstack(&attr, &stackAddr, &size);
865 if (pthread_attr_destroy(&attr) != 0) {
866 LOG_ECMA(ERROR) << "Destroy current thread attr failed";
867 }
868 if (ret != 0) {
869 LOG_ECMA(ERROR) << "Get current thread stack size failed";
870 return result;
871 }
872 }
873 #else
874 pthread_attr_t attr;
875 ret = pthread_getattr_np(pthread_self(), &attr);
876 if (ret != 0) {
877 LOG_ECMA(ERROR) << "Get current thread attr failed";
878 return result;
879 }
880 ret = pthread_attr_getstack(&attr, &stackAddr, &size);
881 if (pthread_attr_destroy(&attr) != 0) {
882 LOG_ECMA(ERROR) << "Destroy current thread attr failed";
883 }
884 if (ret != 0) {
885 LOG_ECMA(ERROR) << "Get current thread stack size failed";
886 return result;
887 }
888 #endif
889
890 bool isMainThread = IsMainThread();
891 uintptr_t threadStackLimit = reinterpret_cast<uintptr_t>(stackAddr);
892 uintptr_t threadStackStart = threadStackLimit + size;
893 if (isMainThread) {
894 struct rlimit rl;
895 ret = getrlimit(RLIMIT_STACK, &rl);
896 if (ret != 0) {
897 LOG_ECMA(ERROR) << "Get current thread stack size failed";
898 return result;
899 }
900 if (rl.rlim_cur > DEFAULT_MAX_SYSTEM_STACK_SIZE) {
901 LOG_ECMA(ERROR) << "Get current thread stack size exceed " << DEFAULT_MAX_SYSTEM_STACK_SIZE
902 << " : " << rl.rlim_cur;
903 return result;
904 }
905 threadStackLimit = threadStackStart - rl.rlim_cur;
906 }
907
908 if (result < threadStackLimit) {
909 result = threadStackLimit;
910 }
911
912 LOG_INTERPRETER(DEBUG) << "Current thread stack start: " << reinterpret_cast<void *>(threadStackStart);
913 LOG_INTERPRETER(DEBUG) << "Used stack before js stack start: "
914 << reinterpret_cast<void *>(threadStackStart - GetCurrentStackPosition());
915 LOG_INTERPRETER(DEBUG) << "Current thread asm stack limit: " << reinterpret_cast<void *>(result);
916
917 // To avoid too much times of stack overflow checking, we only check stack overflow before push vregs or
918 // parameters of variable length. So we need a reserved size of stack to make sure stack won't be overflowed
919 // when push other data.
920 result += EcmaParamConfiguration::GetDefaultReservedStackSize();
921 if (threadStackStart <= result) {
922 LOG_FULL(FATAL) << "Too small stackSize to run jsvm";
923 }
924 return result;
925 #else
926 return 0;
927 #endif
928 }
929
IsLegalAsmSp(uintptr_t sp) const930 bool JSThread::IsLegalAsmSp(uintptr_t sp) const
931 {
932 uint64_t bottom = GetStackLimit() - EcmaParamConfiguration::GetDefaultReservedStackSize();
933 uint64_t top = GetStackStart() + EcmaParamConfiguration::GetAllowedUpperStackDiff();
934 return (bottom <= sp && sp <= top);
935 }
936
IsLegalThreadSp(uintptr_t sp) const937 bool JSThread::IsLegalThreadSp(uintptr_t sp) const
938 {
939 uintptr_t bottom = reinterpret_cast<uintptr_t>(glueData_.frameBase_);
940 size_t maxStackSize = vm_->GetEcmaParamConfiguration().GetMaxStackSize();
941 uintptr_t top = bottom + maxStackSize;
942 return (bottom <= sp && sp <= top);
943 }
944
IsLegalSp(uintptr_t sp) const945 bool JSThread::IsLegalSp(uintptr_t sp) const
946 {
947 return IsLegalAsmSp(sp) || IsLegalThreadSp(sp);
948 }
949
IsMainThread()950 bool JSThread::IsMainThread()
951 {
952 #if !defined(PANDA_TARGET_WINDOWS) && !defined(PANDA_TARGET_MACOS) && !defined(PANDA_TARGET_IOS)
953 return getpid() == syscall(SYS_gettid);
954 #else
955 return true;
956 #endif
957 }
958
PushContext(EcmaContext * context)959 void JSThread::PushContext(EcmaContext *context)
960 {
961 const_cast<Heap *>(vm_->GetHeap())->WaitAllTasksFinished();
962 contexts_.emplace_back(context);
963
964 if (!glueData_.currentContext_) {
965 // The first context in ecma vm.
966 glueData_.currentContext_ = context;
967 context->SetFramePointers(const_cast<JSTaggedType *>(GetCurrentSPFrame()),
968 const_cast<JSTaggedType *>(GetLastLeaveFrame()),
969 const_cast<JSTaggedType *>(GetLastFp()));
970 context->SetFrameBase(glueData_.frameBase_);
971 context->SetStackLimit(glueData_.stackLimit_);
972 context->SetStackStart(glueData_.stackStart_);
973 } else {
974 // algin with 16
975 size_t maxStackSize = vm_->GetEcmaParamConfiguration().GetMaxStackSize();
976 context->SetFrameBase(static_cast<JSTaggedType *>(
977 vm_->GetNativeAreaAllocator()->Allocate(sizeof(JSTaggedType) * maxStackSize)));
978 context->SetFramePointers(context->GetFrameBase() + maxStackSize, nullptr, nullptr);
979 context->SetStackLimit(GetAsmStackLimit());
980 context->SetStackStart(GetCurrentStackPosition());
981 EcmaInterpreter::InitStackFrame(context);
982 }
983 }
984
PopContext()985 void JSThread::PopContext()
986 {
987 contexts_.pop_back();
988 glueData_.currentContext_ = contexts_.back();
989 }
990
SwitchCurrentContext(EcmaContext * currentContext,bool isInIterate)991 void JSThread::SwitchCurrentContext(EcmaContext *currentContext, bool isInIterate)
992 {
993 ASSERT(std::count(contexts_.begin(), contexts_.end(), currentContext));
994
995 glueData_.currentContext_->SetFramePointers(const_cast<JSTaggedType *>(GetCurrentSPFrame()),
996 const_cast<JSTaggedType *>(GetLastLeaveFrame()),
997 const_cast<JSTaggedType *>(GetLastFp()));
998 glueData_.currentContext_->SetFrameBase(glueData_.frameBase_);
999 glueData_.currentContext_->SetStackLimit(GetStackLimit());
1000 glueData_.currentContext_->SetStackStart(GetStackStart());
1001 glueData_.currentContext_->SetGlobalEnv(GetGlueGlobalEnv());
1002 // When the glueData_.currentContext_ is not fully initialized,glueData_.globalObject_ will be hole.
1003 // Assigning hole to JSGlobalObject could cause a mistake at builtins initalization.
1004 if (!glueData_.globalObject_.IsHole()) {
1005 glueData_.currentContext_->GetGlobalEnv()->SetJSGlobalObject(this, glueData_.globalObject_);
1006 }
1007
1008 SetCurrentSPFrame(currentContext->GetCurrentFrame());
1009 SetLastLeaveFrame(currentContext->GetLeaveFrame());
1010 SetLastFp(currentContext->GetLastFp());
1011 glueData_.frameBase_ = currentContext->GetFrameBase();
1012 glueData_.stackLimit_ = currentContext->GetStackLimit();
1013 glueData_.stackStart_ = currentContext->GetStackStart();
1014 if (!currentContext->GlobalEnvIsHole()) {
1015 SetGlueGlobalEnv(*(currentContext->GetGlobalEnv()));
1016 /**
1017 * GlobalObject has two copies, one in GlueData and one in Context.GlobalEnv, when switch context, will save
1018 * GlobalObject in GlueData to CurrentContext.GlobalEnv(is this nessary?), and then switch to new context,
1019 * save the GlobalObject in NewContext.GlobalEnv to GlueData.
1020 * The initial value of GlobalObject in Context.GlobalEnv is Undefined, but in GlueData is Hole,
1021 * so if two SharedGC happened during the builtins initalization like this, maybe will cause incorrect scene:
1022 *
1023 * Default:
1024 * Slot for GlobalObject: Context.GlobalEnv GlueData
1025 * value: Undefined Hole
1026 *
1027 * First SharedGC(JSThread::SwitchCurrentContext), Set GlobalObject from Context.GlobalEnv to GlueData:
1028 * Slot for GlobalObject: Context.GlobalEnv GlueData
1029 * value: Undefined Undefined
1030 *
1031 * Builtins Initialize, Create GlobalObject and Set to Context.GlobalEnv:
1032 * Slot for GlobalObject: Context.GlobalEnv GlueData
1033 * value: Obj Undefined
1034 *
1035 * Second SharedGC(JSThread::SwitchCurrentContext), Set GlobalObject from GlueData to Context.GlobalEnv:
1036 * Slot for GlobalObject: Context.GlobalEnv GlueData
1037 * value: Undefined Undefined
1038 *
1039 * So when copy values between Context.GlobalEnv and GlueData, need to check if the value is Hole in GlueData,
1040 * and if is Undefined in Context.GlobalEnv, because the initial value is different.
1041 */
1042 if (!currentContext->GetGlobalEnv()->GetGlobalObject().IsUndefined()) {
1043 SetGlobalObject(currentContext->GetGlobalEnv()->GetGlobalObject());
1044 }
1045 }
1046 if (!isInIterate) {
1047 // If isInIterate is true, it means it is in GC iterate and global variables are no need to change.
1048 glueData_.globalConst_ = const_cast<GlobalEnvConstants *>(currentContext->GlobalConstants());
1049 }
1050
1051 glueData_.currentContext_ = currentContext;
1052 }
1053
EraseContext(EcmaContext * context)1054 bool JSThread::EraseContext(EcmaContext *context)
1055 {
1056 const_cast<Heap *>(vm_->GetHeap())->WaitAllTasksFinished();
1057 bool isCurrentContext = false;
1058 auto iter = std::find(contexts_.begin(), contexts_.end(), context);
1059 if (*iter == context) {
1060 if (glueData_.currentContext_ == context) {
1061 isCurrentContext = true;
1062 }
1063 contexts_.erase(iter);
1064 if (isCurrentContext) {
1065 SwitchCurrentContext(contexts_.back());
1066 }
1067 return true;
1068 }
1069 return false;
1070 }
1071
ClearContextCachedConstantPool()1072 void JSThread::ClearContextCachedConstantPool()
1073 {
1074 for (EcmaContext *context : contexts_) {
1075 context->ClearCachedConstantPool();
1076 }
1077 }
1078
GetPropertiesCache() const1079 PropertiesCache *JSThread::GetPropertiesCache() const
1080 {
1081 return glueData_.currentContext_->GetPropertiesCache();
1082 }
1083
GetFirstGlobalConst() const1084 const GlobalEnvConstants *JSThread::GetFirstGlobalConst() const
1085 {
1086 return contexts_[0]->GlobalConstants();
1087 }
1088
IsAllContextsInitialized() const1089 bool JSThread::IsAllContextsInitialized() const
1090 {
1091 return contexts_.back()->IsInitialized();
1092 }
1093
IsReadyToUpdateDetector() const1094 bool JSThread::IsReadyToUpdateDetector() const
1095 {
1096 return !GetEnableLazyBuiltins() && IsAllContextsInitialized();
1097 }
1098
GetOrCreateRegExpCache()1099 Area *JSThread::GetOrCreateRegExpCache()
1100 {
1101 if (regExpCache_ == nullptr) {
1102 regExpCache_ = nativeAreaAllocator_->AllocateArea(MAX_REGEXP_CACHE_SIZE);
1103 }
1104 return regExpCache_;
1105 }
1106
InitializeBuiltinObject(const std::string & key)1107 void JSThread::InitializeBuiltinObject(const std::string& key)
1108 {
1109 BuiltinIndex& builtins = BuiltinIndex::GetInstance();
1110 auto index = builtins.GetBuiltinIndex(key);
1111 ASSERT(index != BuiltinIndex::NOT_FOUND);
1112 /*
1113 If using `auto globalObject = GetEcmaVM()->GetGlobalEnv()->GetGlobalObject()` here,
1114 it will cause incorrect result in multi-context environment. For example:
1115
1116 ```ts
1117 let obj = {};
1118 print(obj instanceof Object); // instead of true, will print false
1119 ```
1120 */
1121 auto globalObject = contexts_.back()->GetGlobalEnv()->GetGlobalObject();
1122 auto jsObject = JSHandle<JSObject>(this, globalObject);
1123 auto box = jsObject->GetGlobalPropertyBox(this, key);
1124 if (box == nullptr) {
1125 return;
1126 }
1127 auto& entry = glueData_.builtinEntries_.builtin_[index];
1128 entry.box_ = JSTaggedValue::Cast(box);
1129 auto builtin = JSHandle<JSObject>(this, box->GetValue());
1130 auto hclass = builtin->GetJSHClass();
1131 entry.hClass_ = JSTaggedValue::Cast(hclass);
1132 }
1133
InitializeBuiltinObject()1134 void JSThread::InitializeBuiltinObject()
1135 {
1136 BuiltinIndex& builtins = BuiltinIndex::GetInstance();
1137 for (auto key: builtins.GetBuiltinKeys()) {
1138 InitializeBuiltinObject(key);
1139 }
1140 }
1141
IsPropertyCacheCleared() const1142 bool JSThread::IsPropertyCacheCleared() const
1143 {
1144 for (EcmaContext *context : contexts_) {
1145 if (!context->GetPropertiesCache()->IsCleared()) {
1146 return false;
1147 }
1148 }
1149 return true;
1150 }
1151
UpdateState(ThreadState newState)1152 void JSThread::UpdateState(ThreadState newState)
1153 {
1154 ThreadState oldState = GetState();
1155 if (oldState == ThreadState::RUNNING && newState != ThreadState::RUNNING) {
1156 TransferFromRunningToSuspended(newState);
1157 } else if (oldState != ThreadState::RUNNING && newState == ThreadState::RUNNING) {
1158 TransferToRunning();
1159 } else {
1160 // Here can be some extra checks...
1161 StoreState(newState);
1162 }
1163 }
1164
SuspendThread(bool internalSuspend,SuspendBarrier * barrier)1165 void JSThread::SuspendThread(bool internalSuspend, SuspendBarrier* barrier)
1166 {
1167 LockHolder lock(suspendLock_);
1168 if (!internalSuspend) {
1169 // do smth here if we want to combine internal and external suspension
1170 }
1171
1172 uint32_t old_count = suspendCount_++;
1173 if (old_count == 0) {
1174 SetFlag(ThreadFlag::SUSPEND_REQUEST);
1175 SetCheckSafePointStatus();
1176 }
1177
1178 if (barrier != nullptr) {
1179 ASSERT(suspendBarrier_ == nullptr);
1180 suspendBarrier_ = barrier;
1181 SetFlag(ThreadFlag::ACTIVE_BARRIER);
1182 SetCheckSafePointStatus();
1183 }
1184 }
1185
ResumeThread(bool internalSuspend)1186 void JSThread::ResumeThread(bool internalSuspend)
1187 {
1188 LockHolder lock(suspendLock_);
1189 if (!internalSuspend) {
1190 // do smth here if we want to combine internal and external suspension
1191 }
1192 if (suspendCount_ > 0) {
1193 suspendCount_--;
1194 if (suspendCount_ == 0) {
1195 ClearFlag(ThreadFlag::SUSPEND_REQUEST);
1196 ResetCheckSafePointStatus();
1197 }
1198 }
1199 suspendCondVar_.Signal();
1200 }
1201
WaitSuspension()1202 void JSThread::WaitSuspension()
1203 {
1204 constexpr int TIMEOUT = 100;
1205 ThreadState oldState = GetState();
1206 UpdateState(ThreadState::IS_SUSPENDED);
1207 {
1208 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SuspendTime::WaitSuspension");
1209 LockHolder lock(suspendLock_);
1210 while (suspendCount_ > 0) {
1211 suspendCondVar_.TimedWait(&suspendLock_, TIMEOUT);
1212 // we need to do smth if Runtime is terminating at this point
1213 }
1214 ASSERT(!HasSuspendRequest());
1215 }
1216 UpdateState(oldState);
1217 }
1218
ManagedCodeBegin()1219 void JSThread::ManagedCodeBegin()
1220 {
1221 ASSERT(!IsInManagedState());
1222 UpdateState(ThreadState::RUNNING);
1223 }
1224
ManagedCodeEnd()1225 void JSThread::ManagedCodeEnd()
1226 {
1227 ASSERT(IsInManagedState());
1228 UpdateState(ThreadState::NATIVE);
1229 }
1230
TransferFromRunningToSuspended(ThreadState newState)1231 void JSThread::TransferFromRunningToSuspended(ThreadState newState)
1232 {
1233 ASSERT(currentThread == this);
1234 StoreSuspendedState(newState);
1235 CheckAndPassActiveBarrier();
1236 }
1237
TransferToRunning()1238 void JSThread::TransferToRunning()
1239 {
1240 ASSERT(!IsDaemonThread());
1241 ASSERT(currentThread == this);
1242 StoreRunningState(ThreadState::RUNNING);
1243 // Invoke free weak global callback when thread switch to running
1244 if (!weakNodeFreeGlobalCallbacks_.empty()) {
1245 InvokeWeakNodeFreeGlobalCallBack();
1246 }
1247 if (!vm_->GetSharedNativePointerCallbacks().empty()) {
1248 InvokeSharedNativePointerCallbacks();
1249 }
1250 if (fullMarkRequest_) {
1251 fullMarkRequest_ = const_cast<Heap*>(vm_->GetHeap())->TryTriggerFullMarkBySharedLimit();
1252 }
1253 }
1254
TransferDaemonThreadToRunning()1255 void JSThread::TransferDaemonThreadToRunning()
1256 {
1257 ASSERT(IsDaemonThread());
1258 ASSERT(currentThread == this);
1259 StoreRunningState(ThreadState::RUNNING);
1260 }
1261
StoreState(ThreadState newState)1262 inline void JSThread::StoreState(ThreadState newState)
1263 {
1264 while (true) {
1265 ThreadStateAndFlags oldStateAndFlags;
1266 oldStateAndFlags.asInt = glueData_.stateAndFlags_.asInt;
1267
1268 ThreadStateAndFlags newStateAndFlags;
1269 newStateAndFlags.asStruct.flags = oldStateAndFlags.asStruct.flags;
1270 newStateAndFlags.asStruct.state = newState;
1271
1272 bool done = glueData_.stateAndFlags_.asAtomicInt.compare_exchange_weak(oldStateAndFlags.asNonvolatileInt,
1273 newStateAndFlags.asNonvolatileInt,
1274 std::memory_order_release);
1275 if (LIKELY(done)) {
1276 break;
1277 }
1278 }
1279 }
1280
StoreRunningState(ThreadState newState)1281 void JSThread::StoreRunningState(ThreadState newState)
1282 {
1283 ASSERT(newState == ThreadState::RUNNING);
1284 while (true) {
1285 ThreadStateAndFlags oldStateAndFlags;
1286 oldStateAndFlags.asInt = glueData_.stateAndFlags_.asInt;
1287 ASSERT(oldStateAndFlags.asStruct.state != ThreadState::RUNNING);
1288
1289 if (LIKELY(oldStateAndFlags.asStruct.flags == ThreadFlag::NO_FLAGS)) {
1290 ThreadStateAndFlags newStateAndFlags;
1291 newStateAndFlags.asStruct.flags = oldStateAndFlags.asStruct.flags;
1292 newStateAndFlags.asStruct.state = newState;
1293
1294 if (glueData_.stateAndFlags_.asAtomicInt.compare_exchange_weak(oldStateAndFlags.asNonvolatileInt,
1295 newStateAndFlags.asNonvolatileInt,
1296 std::memory_order_release)) {
1297 break;
1298 }
1299 } else if ((oldStateAndFlags.asStruct.flags & ThreadFlag::ACTIVE_BARRIER) != 0) {
1300 PassSuspendBarrier();
1301 } else if ((oldStateAndFlags.asStruct.flags & ThreadFlag::SUSPEND_REQUEST) != 0) {
1302 constexpr int TIMEOUT = 100;
1303 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SuspendTime::StoreRunningState");
1304 LockHolder lock(suspendLock_);
1305 while (suspendCount_ > 0) {
1306 suspendCondVar_.TimedWait(&suspendLock_, TIMEOUT);
1307 }
1308 ASSERT(!HasSuspendRequest());
1309 }
1310 }
1311 }
1312
StoreSuspendedState(ThreadState newState)1313 inline void JSThread::StoreSuspendedState(ThreadState newState)
1314 {
1315 ASSERT(newState != ThreadState::RUNNING);
1316 StoreState(newState);
1317 }
1318
PostFork()1319 void JSThread::PostFork()
1320 {
1321 SetThreadId();
1322 if (currentThread == nullptr) {
1323 currentThread = this;
1324 ASSERT(GetState() == ThreadState::CREATED);
1325 UpdateState(ThreadState::NATIVE);
1326 } else {
1327 // We tried to call fork in the same thread
1328 ASSERT(currentThread == this);
1329 ASSERT(GetState() == ThreadState::NATIVE);
1330 }
1331 }
1332 #ifndef NDEBUG
IsInManagedState() const1333 bool JSThread::IsInManagedState() const
1334 {
1335 ASSERT(this == JSThread::GetCurrent());
1336 return GetState() == ThreadState::RUNNING;
1337 }
1338
GetMutatorLockState() const1339 MutatorLock::MutatorLockState JSThread::GetMutatorLockState() const
1340 {
1341 return mutatorLockState_;
1342 }
1343
SetMutatorLockState(MutatorLock::MutatorLockState newState)1344 void JSThread::SetMutatorLockState(MutatorLock::MutatorLockState newState)
1345 {
1346 mutatorLockState_ = newState;
1347 }
1348 #endif
1349 } // namespace panda::ecmascript
1350