1 /*
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ecmascript/js_thread.h"
17
18 #include "ecmascript/runtime.h"
19 #include "ecmascript/debugger/js_debugger_manager.h"
20 #include "ecmascript/js_date.h"
21 #include "ecmascript/js_object-inl.h"
22 #include "ecmascript/js_tagged_value.h"
23 #include "ecmascript/runtime_call_id.h"
24
25 #if !defined(PANDA_TARGET_WINDOWS) && !defined(PANDA_TARGET_MACOS) && !defined(PANDA_TARGET_IOS)
26 #include <sys/resource.h>
27 #endif
28
29 #if defined(ENABLE_EXCEPTION_BACKTRACE)
30 #include "ecmascript/platform/backtrace.h"
31 #endif
32 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
33 #include "ecmascript/dfx/cpu_profiler/cpu_profiler.h"
34 #endif
35 #include "ecmascript/dfx/vm_thread_control.h"
36 #include "ecmascript/ecma_global_storage.h"
37 #include "ecmascript/ic/properties_cache.h"
38 #include "ecmascript/interpreter/interpreter.h"
39 #include "ecmascript/mem/concurrent_marker.h"
40 #include "ecmascript/platform/file.h"
41 #include "ecmascript/jit/jit.h"
42
43 namespace panda::ecmascript {
44 using CommonStubCSigns = panda::ecmascript::kungfu::CommonStubCSigns;
45 using BytecodeStubCSigns = panda::ecmascript::kungfu::BytecodeStubCSigns;
46
47 thread_local JSThread *currentThread = nullptr;
48
GetCurrent()49 JSThread *JSThread::GetCurrent()
50 {
51 return currentThread;
52 }
53
54 // static
RegisterThread(JSThread * jsThread)55 void JSThread::RegisterThread(JSThread *jsThread)
56 {
57 Runtime::GetInstance()->RegisterThread(jsThread);
58 // If it is not true, we created a new thread for future fork
59 if (currentThread == nullptr) {
60 currentThread = jsThread;
61 jsThread->UpdateState(ThreadState::NATIVE);
62 }
63 }
64
UnregisterThread(JSThread * jsThread)65 void JSThread::UnregisterThread(JSThread *jsThread)
66 {
67 if (currentThread == jsThread) {
68 jsThread->UpdateState(ThreadState::TERMINATED);
69 currentThread = nullptr;
70 } else {
71 // We have created this JSThread instance but hadn't forked it.
72 ASSERT(jsThread->GetState() == ThreadState::CREATED);
73 jsThread->UpdateState(ThreadState::TERMINATED);
74 }
75 Runtime::GetInstance()->UnregisterThread(jsThread);
76 }
77
78 // static
Create(EcmaVM * vm)79 JSThread *JSThread::Create(EcmaVM *vm)
80 {
81 auto jsThread = new JSThread(vm);
82
83 AsmInterParsedOption asmInterOpt = vm->GetJSOptions().GetAsmInterParsedOption();
84 if (asmInterOpt.enableAsm) {
85 jsThread->EnableAsmInterpreter();
86 }
87
88 jsThread->nativeAreaAllocator_ = vm->GetNativeAreaAllocator();
89 jsThread->heapRegionAllocator_ = vm->GetHeapRegionAllocator();
90 // algin with 16
91 size_t maxStackSize = vm->GetEcmaParamConfiguration().GetMaxStackSize();
92 jsThread->glueData_.frameBase_ = static_cast<JSTaggedType *>(
93 vm->GetNativeAreaAllocator()->Allocate(sizeof(JSTaggedType) * maxStackSize));
94 jsThread->glueData_.currentFrame_ = jsThread->glueData_.frameBase_ + maxStackSize;
95 EcmaInterpreter::InitStackFrame(jsThread);
96
97 jsThread->glueData_.stackLimit_ = GetAsmStackLimit();
98 jsThread->glueData_.stackStart_ = GetCurrentStackPosition();
99 jsThread->glueData_.isEnableElementsKind_ = vm->IsEnableElementsKind();
100 jsThread->SetThreadId();
101
102 RegisterThread(jsThread);
103 return jsThread;
104 }
105
JSThread(EcmaVM * vm)106 JSThread::JSThread(EcmaVM *vm) : id_(os::thread::GetCurrentThreadId()), vm_(vm)
107 {
108 auto chunk = vm->GetChunk();
109 if (!vm_->GetJSOptions().EnableGlobalLeakCheck()) {
110 globalStorage_ = chunk->New<EcmaGlobalStorage<Node>>(this, vm->GetNativeAreaAllocator());
111 newGlobalHandle_ = [this](JSTaggedType value) { return globalStorage_->NewGlobalHandle(value); };
112 disposeGlobalHandle_ = [this](uintptr_t nodeAddr) { globalStorage_->DisposeGlobalHandle(nodeAddr); };
113 setWeak_ = [this](uintptr_t nodeAddr, void *ref, WeakClearCallback freeGlobalCallBack,
114 WeakClearCallback nativeFinalizeCallBack) {
115 return globalStorage_->SetWeak(nodeAddr, ref, freeGlobalCallBack, nativeFinalizeCallBack);
116 };
117 clearWeak_ = [this](uintptr_t nodeAddr) { return globalStorage_->ClearWeak(nodeAddr); };
118 isWeak_ = [this](uintptr_t addr) { return globalStorage_->IsWeak(addr); };
119 } else {
120 globalDebugStorage_ = chunk->New<EcmaGlobalStorage<DebugNode>>(this, vm->GetNativeAreaAllocator());
121 newGlobalHandle_ = [this](JSTaggedType value) { return globalDebugStorage_->NewGlobalHandle(value); };
122 disposeGlobalHandle_ = [this](uintptr_t nodeAddr) { globalDebugStorage_->DisposeGlobalHandle(nodeAddr); };
123 setWeak_ = [this](uintptr_t nodeAddr, void *ref, WeakClearCallback freeGlobalCallBack,
124 WeakClearCallback nativeFinalizeCallBack) {
125 return globalDebugStorage_->SetWeak(nodeAddr, ref, freeGlobalCallBack, nativeFinalizeCallBack);
126 };
127 clearWeak_ = [this](uintptr_t nodeAddr) { return globalDebugStorage_->ClearWeak(nodeAddr); };
128 isWeak_ = [this](uintptr_t addr) { return globalDebugStorage_->IsWeak(addr); };
129 }
130 vmThreadControl_ = new VmThreadControl(this);
131 SetBCStubStatus(BCStubStatus::NORMAL_BC_STUB);
132 dateUtils_ = new DateUtils();
133 }
134
JSThread(EcmaVM * vm,ThreadType threadType)135 JSThread::JSThread(EcmaVM *vm, ThreadType threadType) : id_(os::thread::GetCurrentThreadId()),
136 vm_(vm), threadType_(threadType)
137 {
138 ASSERT(threadType == ThreadType::JIT_THREAD);
139 // jit thread no need GCIterating
140 readyForGCIterating_ = false;
141 RegisterThread(this);
142 };
143
JSThread(ThreadType threadType)144 JSThread::JSThread(ThreadType threadType) : threadType_(threadType)
145 {
146 ASSERT(threadType == ThreadType::DAEMON_THREAD);
147 // daemon thread no need GCIterating
148 readyForGCIterating_ = false;
149 }
150
~JSThread()151 JSThread::~JSThread()
152 {
153 readyForGCIterating_ = false;
154 if (globalStorage_ != nullptr) {
155 GetEcmaVM()->GetChunk()->Delete(globalStorage_);
156 globalStorage_ = nullptr;
157 }
158 if (globalDebugStorage_ != nullptr) {
159 GetEcmaVM()->GetChunk()->Delete(globalDebugStorage_);
160 globalDebugStorage_ = nullptr;
161 }
162
163 for (auto item : contexts_) {
164 GetNativeAreaAllocator()->Free(item->GetFrameBase(), sizeof(JSTaggedType) *
165 vm_->GetEcmaParamConfiguration().GetMaxStackSize());
166 item->SetFrameBase(nullptr);
167 delete item;
168 }
169 contexts_.clear();
170 GetNativeAreaAllocator()->FreeArea(regExpCache_);
171
172 glueData_.frameBase_ = nullptr;
173 nativeAreaAllocator_ = nullptr;
174 heapRegionAllocator_ = nullptr;
175 regExpCache_ = nullptr;
176 if (vmThreadControl_ != nullptr) {
177 delete vmThreadControl_;
178 vmThreadControl_ = nullptr;
179 }
180 // DaemonThread will be unregistered when the binding std::thread release.
181 if (!IsDaemonThread()) {
182 UnregisterThread(this);
183 }
184 if (dateUtils_ != nullptr) {
185 delete dateUtils_;
186 dateUtils_ = nullptr;
187 }
188 }
189
SetException(JSTaggedValue exception)190 void JSThread::SetException(JSTaggedValue exception)
191 {
192 glueData_.exception_ = exception;
193 #if defined(ENABLE_EXCEPTION_BACKTRACE)
194 if (vm_->GetJSOptions().EnableExceptionBacktrace()) {
195 LOG_ECMA(INFO) << "SetException:" << exception.GetRawData();
196 std::ostringstream stack;
197 Backtrace(stack);
198 LOG_ECMA(INFO) << stack.str();
199 }
200 #endif
201 }
202
ClearException()203 void JSThread::ClearException()
204 {
205 glueData_.exception_ = JSTaggedValue::Hole();
206 }
207
GetCurrentLexenv() const208 JSTaggedValue JSThread::GetCurrentLexenv() const
209 {
210 FrameHandler frameHandler(this);
211 return frameHandler.GetEnv();
212 }
213
GetCurrentFunction() const214 JSTaggedValue JSThread::GetCurrentFunction() const
215 {
216 FrameHandler frameHandler(this);
217 return frameHandler.GetFunction();
218 }
219
GetCurrentFrame() const220 const JSTaggedType *JSThread::GetCurrentFrame() const
221 {
222 if (IsAsmInterpreter()) {
223 return GetLastLeaveFrame();
224 }
225 return GetCurrentSPFrame();
226 }
227
SetCurrentFrame(JSTaggedType * sp)228 void JSThread::SetCurrentFrame(JSTaggedType *sp)
229 {
230 if (IsAsmInterpreter()) {
231 return SetLastLeaveFrame(sp);
232 }
233 return SetCurrentSPFrame(sp);
234 }
235
GetCurrentInterpretedFrame() const236 const JSTaggedType *JSThread::GetCurrentInterpretedFrame() const
237 {
238 if (IsAsmInterpreter()) {
239 auto frameHandler = FrameHandler(this);
240 return frameHandler.GetSp();
241 }
242 return GetCurrentSPFrame();
243 }
244
InvokeWeakNodeFreeGlobalCallBack()245 void JSThread::InvokeWeakNodeFreeGlobalCallBack()
246 {
247 while (!weakNodeFreeGlobalCallbacks_.empty()) {
248 auto callbackPair = weakNodeFreeGlobalCallbacks_.back();
249 weakNodeFreeGlobalCallbacks_.pop_back();
250 ASSERT(callbackPair.first != nullptr && callbackPair.second != nullptr);
251 auto callback = callbackPair.first;
252 (*callback)(callbackPair.second);
253 }
254 }
255
InvokeSharedNativePointerCallbacks()256 void JSThread::InvokeSharedNativePointerCallbacks()
257 {
258 auto &callbacks = vm_->GetSharedNativePointerCallbacks();
259 while (!callbacks.empty()) {
260 auto callbackPair = callbacks.back();
261 callbacks.pop_back();
262 ASSERT(callbackPair.first != nullptr && callbackPair.second.first != nullptr &&
263 callbackPair.second.second != nullptr);
264 auto callback = callbackPair.first;
265 (*callback)(env_, callbackPair.second.first, callbackPair.second.second);
266 }
267 }
268
InvokeWeakNodeNativeFinalizeCallback()269 void JSThread::InvokeWeakNodeNativeFinalizeCallback()
270 {
271 // the second callback may lead to another GC, if this, return directly;
272 if (runningNativeFinalizeCallbacks_) {
273 return;
274 }
275 runningNativeFinalizeCallbacks_ = true;
276 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "InvokeNativeFinalizeCallbacks num:"
277 + std::to_string(weakNodeNativeFinalizeCallbacks_.size()));
278 while (!weakNodeNativeFinalizeCallbacks_.empty()) {
279 auto callbackPair = weakNodeNativeFinalizeCallbacks_.back();
280 weakNodeNativeFinalizeCallbacks_.pop_back();
281 ASSERT(callbackPair.first != nullptr && callbackPair.second != nullptr);
282 auto callback = callbackPair.first;
283 (*callback)(callbackPair.second);
284 }
285 if (finalizeTaskCallback_ != nullptr) {
286 finalizeTaskCallback_();
287 }
288 runningNativeFinalizeCallbacks_ = false;
289 }
290
IsStartGlobalLeakCheck() const291 bool JSThread::IsStartGlobalLeakCheck() const
292 {
293 return GetEcmaVM()->GetJSOptions().IsStartGlobalLeakCheck();
294 }
295
EnableGlobalObjectLeakCheck() const296 bool JSThread::EnableGlobalObjectLeakCheck() const
297 {
298 return GetEcmaVM()->GetJSOptions().EnableGlobalObjectLeakCheck();
299 }
300
EnableGlobalPrimitiveLeakCheck() const301 bool JSThread::EnableGlobalPrimitiveLeakCheck() const
302 {
303 return GetEcmaVM()->GetJSOptions().EnableGlobalPrimitiveLeakCheck();
304 }
305
IsInRunningStateOrProfiling() const306 bool JSThread::IsInRunningStateOrProfiling() const
307 {
308 bool result = IsInRunningState();
309 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
310 result |= vm_->GetHeapProfile() != nullptr;
311 #endif
312 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
313 result |= GetIsProfiling();
314 #endif
315 return result;
316 }
317
WriteToStackTraceFd(std::ostringstream & buffer) const318 void JSThread::WriteToStackTraceFd(std::ostringstream &buffer) const
319 {
320 if (stackTraceFd_ < 0) {
321 return;
322 }
323 buffer << std::endl;
324 DPrintf(reinterpret_cast<fd_t>(stackTraceFd_), buffer.str());
325 buffer.str("");
326 }
327
SetStackTraceFd(int32_t fd)328 void JSThread::SetStackTraceFd(int32_t fd)
329 {
330 stackTraceFd_ = fd;
331 }
332
CloseStackTraceFd()333 void JSThread::CloseStackTraceFd()
334 {
335 if (stackTraceFd_ != -1) {
336 FSync(reinterpret_cast<fd_t>(stackTraceFd_));
337 Close(reinterpret_cast<fd_t>(stackTraceFd_));
338 stackTraceFd_ = -1;
339 }
340 }
341
SetJitCodeMap(JSTaggedType exception,MachineCode * machineCode,std::string & methodName,uintptr_t offset)342 void JSThread::SetJitCodeMap(JSTaggedType exception, MachineCode* machineCode, std::string &methodName,
343 uintptr_t offset)
344 {
345 auto it = jitCodeMaps_.find(exception);
346 if (it != jitCodeMaps_.end()) {
347 it->second->push_back(std::make_tuple(machineCode, methodName, offset));
348 } else {
349 JitCodeVector *jitCode = new JitCodeVector {std::make_tuple(machineCode, methodName, offset)};
350 jitCodeMaps_.emplace(exception, jitCode);
351 }
352 }
353
Iterate(const RootVisitor & visitor,const RootRangeVisitor & rangeVisitor,const RootBaseAndDerivedVisitor & derivedVisitor)354 void JSThread::Iterate(const RootVisitor &visitor, const RootRangeVisitor &rangeVisitor,
355 const RootBaseAndDerivedVisitor &derivedVisitor)
356 {
357 if (!glueData_.exception_.IsHole()) {
358 visitor(Root::ROOT_VM, ObjectSlot(ToUintPtr(&glueData_.exception_)));
359 }
360 rangeVisitor(
361 Root::ROOT_VM, ObjectSlot(glueData_.builtinEntries_.Begin()), ObjectSlot(glueData_.builtinEntries_.End()));
362
363 EcmaContext *tempContext = glueData_.currentContext_;
364 for (EcmaContext *context : contexts_) {
365 // visit stack roots
366 SwitchCurrentContext(context, true);
367 FrameHandler frameHandler(this);
368 frameHandler.Iterate(visitor, rangeVisitor, derivedVisitor);
369 context->Iterate(visitor, rangeVisitor);
370 }
371 SwitchCurrentContext(tempContext, true);
372 // visit tagged handle storage roots
373 if (vm_->GetJSOptions().EnableGlobalLeakCheck()) {
374 IterateHandleWithCheck(visitor, rangeVisitor);
375 } else {
376 size_t globalCount = 0;
377 globalStorage_->IterateUsageGlobal([visitor, &globalCount](Node *node) {
378 JSTaggedValue value(node->GetObject());
379 if (value.IsHeapObject()) {
380 visitor(ecmascript::Root::ROOT_HANDLE, ecmascript::ObjectSlot(node->GetObjectAddress()));
381 }
382 globalCount++;
383 });
384 static bool hasCheckedGlobalCount = false;
385 static const size_t WARN_GLOBAL_COUNT = 100000;
386 if (!hasCheckedGlobalCount && globalCount >= WARN_GLOBAL_COUNT) {
387 LOG_ECMA(WARN) << "Global reference count is " << globalCount << ",It exceed the upper limit 100000!";
388 hasCheckedGlobalCount = true;
389 }
390 }
391 }
IterateJitCodeMap(const JitCodeMapVisitor & jitCodeMapVisitor)392 void JSThread::IterateJitCodeMap(const JitCodeMapVisitor &jitCodeMapVisitor)
393 {
394 jitCodeMapVisitor(jitCodeMaps_);
395 }
396
IterateHandleWithCheck(const RootVisitor & visitor,const RootRangeVisitor & rangeVisitor)397 void JSThread::IterateHandleWithCheck(const RootVisitor &visitor, const RootRangeVisitor &rangeVisitor)
398 {
399 size_t handleCount = 0;
400 for (EcmaContext *context : contexts_) {
401 handleCount += context->IterateHandle(rangeVisitor);
402 }
403
404 size_t globalCount = 0;
405 static const int JS_TYPE_LAST = static_cast<int>(JSType::TYPE_LAST);
406 int typeCount[JS_TYPE_LAST] = { 0 };
407 int primitiveCount = 0;
408 bool isStopObjectLeakCheck = EnableGlobalObjectLeakCheck() && !IsStartGlobalLeakCheck() && stackTraceFd_ > 0;
409 bool isStopPrimitiveLeakCheck = EnableGlobalPrimitiveLeakCheck() && !IsStartGlobalLeakCheck() && stackTraceFd_ > 0;
410 std::ostringstream buffer;
411 globalDebugStorage_->IterateUsageGlobal([this, visitor, &globalCount, &typeCount, &primitiveCount,
412 isStopObjectLeakCheck, isStopPrimitiveLeakCheck, &buffer](DebugNode *node) {
413 node->MarkCount();
414 JSTaggedValue value(node->GetObject());
415 if (value.IsHeapObject()) {
416 visitor(ecmascript::Root::ROOT_HANDLE, ecmascript::ObjectSlot(node->GetObjectAddress()));
417 TaggedObject *object = value.GetTaggedObject();
418 MarkWord word(value.GetTaggedObject());
419 if (word.IsForwardingAddress()) {
420 object = word.ToForwardingAddress();
421 }
422 typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
423
424 // Print global information about possible memory leaks.
425 // You can print the global new stack within the range of the leaked global number.
426 if (isStopObjectLeakCheck && node->GetGlobalNumber() > 0 && node->GetMarkCount() > 0) {
427 buffer << "Global maybe leak object address:" << std::hex << object <<
428 ", type:" << JSHClass::DumpJSType(JSType(object->GetClass()->GetObjectType())) <<
429 ", node address:" << node << ", number:" << std::dec << node->GetGlobalNumber() <<
430 ", markCount:" << node->GetMarkCount();
431 WriteToStackTraceFd(buffer);
432 }
433 } else {
434 primitiveCount++;
435 if (isStopPrimitiveLeakCheck && node->GetGlobalNumber() > 0 && node->GetMarkCount() > 0) {
436 buffer << "Global maybe leak primitive:" << std::hex << value.GetRawData() <<
437 ", node address:" << node << ", number:" << std::dec << node->GetGlobalNumber() <<
438 ", markCount:" << node->GetMarkCount();
439 WriteToStackTraceFd(buffer);
440 }
441 }
442 globalCount++;
443 });
444
445 if (isStopObjectLeakCheck || isStopPrimitiveLeakCheck) {
446 buffer << "Global leak check success!";
447 WriteToStackTraceFd(buffer);
448 CloseStackTraceFd();
449 }
450 // Determine whether memory leakage by checking handle and global count.
451 LOG_ECMA(INFO) << "Iterate root handle count:" << handleCount << ", global handle count:" << globalCount;
452 OPTIONAL_LOG(GetEcmaVM(), INFO) << "Global type Primitive count:" << primitiveCount;
453 // Print global object type statistic.
454 static const int MIN_COUNT_THRESHOLD = 50;
455 for (int i = 0; i < JS_TYPE_LAST; i++) {
456 if (typeCount[i] > MIN_COUNT_THRESHOLD) {
457 OPTIONAL_LOG(GetEcmaVM(), INFO) << "Global type " << JSHClass::DumpJSType(JSType(i))
458 << " count:" << typeCount[i];
459 }
460 }
461 }
462
IterateWeakEcmaGlobalStorage(const WeakRootVisitor & visitor,GCKind gcKind)463 void JSThread::IterateWeakEcmaGlobalStorage(const WeakRootVisitor &visitor, GCKind gcKind)
464 {
465 auto callBack = [this, visitor, gcKind](WeakNode *node) {
466 JSTaggedValue value(node->GetObject());
467 if (!value.IsHeapObject()) {
468 return;
469 }
470 auto object = value.GetTaggedObject();
471 auto fwd = visitor(object);
472 if (fwd == nullptr) {
473 // undefind
474 node->SetObject(JSTaggedValue::Undefined().GetRawData());
475 auto nativeFinalizeCallback = node->GetNativeFinalizeCallback();
476 if (nativeFinalizeCallback) {
477 weakNodeNativeFinalizeCallbacks_.push_back(std::make_pair(nativeFinalizeCallback,
478 node->GetReference()));
479 }
480 auto freeGlobalCallBack = node->GetFreeGlobalCallback();
481 if (!freeGlobalCallBack) {
482 // If no callback, dispose global immediately
483 DisposeGlobalHandle(ToUintPtr(node));
484 } else if (gcKind == GCKind::SHARED_GC) {
485 // For shared GC, free global should defer execute in its own thread
486 weakNodeFreeGlobalCallbacks_.push_back(std::make_pair(freeGlobalCallBack, node->GetReference()));
487 } else {
488 node->CallFreeGlobalCallback();
489 }
490 } else if (fwd != object) {
491 // update
492 node->SetObject(JSTaggedValue(fwd).GetRawData());
493 }
494 };
495 if (!vm_->GetJSOptions().EnableGlobalLeakCheck()) {
496 globalStorage_->IterateWeakUsageGlobal(callBack);
497 } else {
498 globalDebugStorage_->IterateWeakUsageGlobal(callBack);
499 }
500 }
501
UpdateJitCodeMapReference(const WeakRootVisitor & visitor)502 void JSThread::UpdateJitCodeMapReference(const WeakRootVisitor &visitor)
503 {
504 auto it = jitCodeMaps_.begin();
505 while (it != jitCodeMaps_.end()) {
506 auto obj = reinterpret_cast<TaggedObject *>(it->first);
507 auto fwd = visitor(obj);
508 if (fwd == nullptr) {
509 delete it->second;
510 it = jitCodeMaps_.erase(it);
511 } else if (fwd != obj) {
512 jitCodeMaps_.emplace(JSTaggedValue(fwd).GetRawData(), it->second);
513 it = jitCodeMaps_.erase(it);
514 } else {
515 ++it;
516 }
517 }
518 }
519
DoStackOverflowCheck(const JSTaggedType * sp)520 bool JSThread::DoStackOverflowCheck(const JSTaggedType *sp)
521 {
522 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
523 if (UNLIKELY(!IsCrossThreadExecutionEnable() && sp <= glueData_.frameBase_ + RESERVE_STACK_SIZE)) {
524 vm_->CheckThread();
525 LOG_ECMA(ERROR) << "Stack overflow! Remaining stack size is: " << (sp - glueData_.frameBase_);
526 if (LIKELY(!HasPendingException())) {
527 ObjectFactory *factory = GetEcmaVM()->GetFactory();
528 JSHandle<JSObject> error = factory->GetJSError(base::ErrorType::RANGE_ERROR,
529 "Stack overflow!", StackCheck::NO);
530 SetException(error.GetTaggedValue());
531 }
532 return true;
533 }
534 return false;
535 }
536
DoStackLimitCheck()537 bool JSThread::DoStackLimitCheck()
538 {
539 if (UNLIKELY(!IsCrossThreadExecutionEnable() && GetCurrentStackPosition() < GetStackLimit())) {
540 vm_->CheckThread();
541 LOG_ECMA(ERROR) << "Stack overflow! current:" << GetCurrentStackPosition() << " limit:" << GetStackLimit();
542 if (LIKELY(!HasPendingException())) {
543 ObjectFactory *factory = GetEcmaVM()->GetFactory();
544 JSHandle<JSObject> error = factory->GetJSError(base::ErrorType::RANGE_ERROR,
545 "Stack overflow!", StackCheck::NO);
546 SetException(error.GetTaggedValue());
547 }
548 return true;
549 }
550 return false;
551 }
552
ExpandHandleStorage()553 uintptr_t *JSThread::ExpandHandleStorage()
554 {
555 return GetCurrentEcmaContext()->ExpandHandleStorage();
556 }
557
ShrinkHandleStorage(int prevIndex)558 void JSThread::ShrinkHandleStorage(int prevIndex)
559 {
560 GetCurrentEcmaContext()->ShrinkHandleStorage(prevIndex);
561 }
562
NotifyStableArrayElementsGuardians(JSHandle<JSObject> receiver,StableArrayChangeKind changeKind)563 void JSThread::NotifyStableArrayElementsGuardians(JSHandle<JSObject> receiver, StableArrayChangeKind changeKind)
564 {
565 if (!glueData_.stableArrayElementsGuardians_) {
566 return;
567 }
568 if (!receiver->GetJSHClass()->IsPrototype() && !receiver->IsJSArray()) {
569 return;
570 }
571 auto env = GetEcmaVM()->GetGlobalEnv();
572 if (receiver.GetTaggedValue() == env->GetObjectFunctionPrototype().GetTaggedValue() ||
573 receiver.GetTaggedValue() == env->GetArrayPrototype().GetTaggedValue()) {
574 glueData_.stableArrayElementsGuardians_ = false;
575 return;
576 }
577 if (changeKind == StableArrayChangeKind::PROTO && receiver->IsJSArray()) {
578 glueData_.stableArrayElementsGuardians_ = false;
579 }
580 }
581
ResetGuardians()582 void JSThread::ResetGuardians()
583 {
584 glueData_.stableArrayElementsGuardians_ = true;
585 }
586
SetInitialBuiltinHClass(BuiltinTypeId type,JSHClass * builtinHClass,JSHClass * instanceHClass,JSHClass * prototypeHClass,JSHClass * prototypeOfPrototypeHClass,JSHClass * extraHClass)587 void JSThread::SetInitialBuiltinHClass(
588 BuiltinTypeId type, JSHClass *builtinHClass, JSHClass *instanceHClass,
589 JSHClass *prototypeHClass, JSHClass *prototypeOfPrototypeHClass, JSHClass *extraHClass)
590 {
591 size_t index = BuiltinHClassEntries::GetEntryIndex(type);
592 auto &entry = glueData_.builtinHClassEntries_.entries[index];
593 LOG_ECMA(DEBUG) << "JSThread::SetInitialBuiltinHClass: "
594 << "Builtin = " << ToString(type)
595 << ", builtinHClass = " << builtinHClass
596 << ", instanceHClass = " << instanceHClass
597 << ", prototypeHClass = " << prototypeHClass
598 << ", prototypeOfPrototypeHClass = " << prototypeOfPrototypeHClass
599 << ", extraHClass = " << extraHClass;
600 entry.builtinHClass = builtinHClass;
601 entry.instanceHClass = instanceHClass;
602 entry.prototypeHClass = prototypeHClass;
603 entry.prototypeOfPrototypeHClass = prototypeOfPrototypeHClass;
604 entry.extraHClass = extraHClass;
605 }
606
SetInitialBuiltinGlobalHClass(JSHClass * builtinHClass,GlobalIndex globalIndex)607 void JSThread::SetInitialBuiltinGlobalHClass(
608 JSHClass *builtinHClass, GlobalIndex globalIndex)
609 {
610 auto &map = ctorHclassEntries_;
611 map[builtinHClass] = globalIndex;
612 }
613
GetBuiltinHClass(BuiltinTypeId type) const614 JSHClass *JSThread::GetBuiltinHClass(BuiltinTypeId type) const
615 {
616 size_t index = BuiltinHClassEntries::GetEntryIndex(type);
617 return glueData_.builtinHClassEntries_.entries[index].builtinHClass;
618 }
619
GetBuiltinInstanceHClass(BuiltinTypeId type) const620 JSHClass *JSThread::GetBuiltinInstanceHClass(BuiltinTypeId type) const
621 {
622 size_t index = BuiltinHClassEntries::GetEntryIndex(type);
623 return glueData_.builtinHClassEntries_.entries[index].instanceHClass;
624 }
625
GetBuiltinExtraHClass(BuiltinTypeId type) const626 JSHClass *JSThread::GetBuiltinExtraHClass(BuiltinTypeId type) const
627 {
628 size_t index = BuiltinHClassEntries::GetEntryIndex(type);
629 return glueData_.builtinHClassEntries_.entries[index].extraHClass;
630 }
631
GetArrayInstanceHClass(ElementsKind kind,bool isPrototype) const632 JSHClass *JSThread::GetArrayInstanceHClass(ElementsKind kind, bool isPrototype) const
633 {
634 auto iter = GetArrayHClassIndexMap().find(kind);
635 ASSERT(iter != GetArrayHClassIndexMap().end());
636 auto index = isPrototype ? static_cast<size_t>(iter->second.second) : static_cast<size_t>(iter->second.first);
637 auto exceptArrayHClass = GlobalConstants()->GetGlobalConstantObject(index);
638 auto exceptRecvHClass = JSHClass::Cast(exceptArrayHClass.GetTaggedObject());
639 ASSERT(exceptRecvHClass->IsJSArray());
640 return exceptRecvHClass;
641 }
642
GetBuiltinPrototypeHClass(BuiltinTypeId type) const643 JSHClass *JSThread::GetBuiltinPrototypeHClass(BuiltinTypeId type) const
644 {
645 size_t index = BuiltinHClassEntries::GetEntryIndex(type);
646 return glueData_.builtinHClassEntries_.entries[index].prototypeHClass;
647 }
648
GetBuiltinPrototypeOfPrototypeHClass(BuiltinTypeId type) const649 JSHClass *JSThread::GetBuiltinPrototypeOfPrototypeHClass(BuiltinTypeId type) const
650 {
651 size_t index = BuiltinHClassEntries::GetEntryIndex(type);
652 return glueData_.builtinHClassEntries_.entries[index].prototypeOfPrototypeHClass;
653 }
654
GetBuiltinHClassOffset(BuiltinTypeId type,bool isArch32)655 size_t JSThread::GetBuiltinHClassOffset(BuiltinTypeId type, bool isArch32)
656 {
657 return GetGlueDataOffset() + GlueData::GetBuiltinHClassOffset(type, isArch32);
658 }
659
GetBuiltinPrototypeHClassOffset(BuiltinTypeId type,bool isArch32)660 size_t JSThread::GetBuiltinPrototypeHClassOffset(BuiltinTypeId type, bool isArch32)
661 {
662 return GetGlueDataOffset() + GlueData::GetBuiltinPrototypeHClassOffset(type, isArch32);
663 }
664
CheckSwitchDebuggerBCStub()665 void JSThread::CheckSwitchDebuggerBCStub()
666 {
667 auto isDebug = GetEcmaVM()->GetJsDebuggerManager()->IsDebugMode();
668 if (LIKELY(!isDebug)) {
669 if (glueData_.bcStubEntries_.Get(0) == glueData_.bcStubEntries_.Get(1)) {
670 for (size_t i = 0; i < BCStubEntries::BC_HANDLER_COUNT; i++) {
671 auto stubEntry = glueData_.bcDebuggerStubEntries_.Get(i);
672 auto debuggerStubEbtry = glueData_.bcStubEntries_.Get(i);
673 glueData_.bcStubEntries_.Set(i, stubEntry);
674 glueData_.bcDebuggerStubEntries_.Set(i, debuggerStubEbtry);
675 }
676 }
677 } else {
678 if (glueData_.bcDebuggerStubEntries_.Get(0) == glueData_.bcDebuggerStubEntries_.Get(1)) {
679 for (size_t i = 0; i < BCStubEntries::BC_HANDLER_COUNT; i++) {
680 auto stubEntry = glueData_.bcStubEntries_.Get(i);
681 auto debuggerStubEbtry = glueData_.bcDebuggerStubEntries_.Get(i);
682 glueData_.bcDebuggerStubEntries_.Set(i, stubEntry);
683 glueData_.bcStubEntries_.Set(i, debuggerStubEbtry);
684 }
685 }
686 }
687 }
688
CheckOrSwitchPGOStubs()689 void JSThread::CheckOrSwitchPGOStubs()
690 {
691 bool isSwitch = false;
692 if (IsPGOProfilerEnable()) {
693 if (GetBCStubStatus() == BCStubStatus::NORMAL_BC_STUB) {
694 SetBCStubStatus(BCStubStatus::PROFILE_BC_STUB);
695 isSwitch = true;
696 }
697 } else {
698 if (GetBCStubStatus() == BCStubStatus::PROFILE_BC_STUB) {
699 SetBCStubStatus(BCStubStatus::NORMAL_BC_STUB);
700 isSwitch = true;
701 }
702 }
703 if (isSwitch) {
704 Address curAddress;
705 #define SWITCH_PGO_STUB_ENTRY(fromName, toName, ...) \
706 curAddress = GetBCStubEntry(BytecodeStubCSigns::ID_##fromName); \
707 SetBCStubEntry(BytecodeStubCSigns::ID_##fromName, GetBCStubEntry(BytecodeStubCSigns::ID_##toName)); \
708 SetBCStubEntry(BytecodeStubCSigns::ID_##toName, curAddress);
709 ASM_INTERPRETER_BC_PROFILER_STUB_LIST(SWITCH_PGO_STUB_ENTRY)
710 #undef SWITCH_PGO_STUB_ENTRY
711 }
712 }
713
SwitchJitProfileStubs(bool isEnablePgo)714 void JSThread::SwitchJitProfileStubs(bool isEnablePgo)
715 {
716 if (isEnablePgo) {
717 SetPGOProfilerEnable(true);
718 CheckOrSwitchPGOStubs();
719 return;
720 }
721 bool isSwitch = false;
722 if (GetBCStubStatus() == BCStubStatus::NORMAL_BC_STUB) {
723 SetBCStubStatus(BCStubStatus::JIT_PROFILE_BC_STUB);
724 isSwitch = true;
725 }
726 if (isSwitch) {
727 Address curAddress;
728 #define SWITCH_PGO_STUB_ENTRY(fromName, toName, ...) \
729 curAddress = GetBCStubEntry(BytecodeStubCSigns::ID_##fromName); \
730 SetBCStubEntry(BytecodeStubCSigns::ID_##fromName, GetBCStubEntry(BytecodeStubCSigns::ID_##toName)); \
731 SetBCStubEntry(BytecodeStubCSigns::ID_##toName, curAddress);
732 ASM_INTERPRETER_BC_JIT_PROFILER_STUB_LIST(SWITCH_PGO_STUB_ENTRY)
733 #undef SWITCH_PGO_STUB_ENTRY
734 }
735 }
736
TerminateExecution()737 void JSThread::TerminateExecution()
738 {
739 // set the TERMINATE_ERROR to exception
740 ObjectFactory *factory = GetEcmaVM()->GetFactory();
741 JSHandle<JSObject> error = factory->GetJSError(ErrorType::TERMINATION_ERROR,
742 "Terminate execution!", StackCheck::NO);
743 SetException(error.GetTaggedValue());
744 }
745
CheckAndPassActiveBarrier()746 void JSThread::CheckAndPassActiveBarrier()
747 {
748 ThreadStateAndFlags oldStateAndFlags;
749 oldStateAndFlags.asInt = glueData_.stateAndFlags_.asInt;
750 if ((oldStateAndFlags.asStruct.flags & ThreadFlag::ACTIVE_BARRIER) != 0) {
751 PassSuspendBarrier();
752 }
753 }
754
PassSuspendBarrier()755 bool JSThread::PassSuspendBarrier()
756 {
757 // Use suspendLock_ to avoid data-race between suspend-all-thread and suspended-threads.
758 LockHolder lock(suspendLock_);
759 if (suspendBarrier_ != nullptr) {
760 suspendBarrier_->PassStrongly();
761 suspendBarrier_ = nullptr;
762 ClearFlag(ThreadFlag::ACTIVE_BARRIER);
763 return true;
764 }
765 return false;
766 }
767
CheckSafepoint()768 bool JSThread::CheckSafepoint()
769 {
770 ResetCheckSafePointStatus();
771
772 if (HasTerminationRequest()) {
773 TerminateExecution();
774 SetVMTerminated(true);
775 SetTerminationRequest(false);
776 }
777
778 if (HasSuspendRequest()) {
779 WaitSuspension();
780 }
781
782 // vmThreadControl_ 's thread_ is current JSThread's this.
783 if (VMNeedSuspension()) {
784 vmThreadControl_->SuspendVM();
785 }
786 if (HasInstallMachineCode()) {
787 vm_->GetJit()->InstallTasks(this);
788 SetInstallMachineCode(false);
789 }
790
791 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
792 if (needProfiling_.load() && !isProfiling_) {
793 DFXJSNApi::StartCpuProfilerForFile(vm_, profileName_, CpuProfiler::INTERVAL_OF_INNER_START);
794 SetNeedProfiling(false);
795 }
796 #endif // ECMASCRIPT_SUPPORT_CPUPROFILER
797 bool gcTriggered = false;
798 #ifndef NDEBUG
799 if (vm_->GetJSOptions().EnableForceGC()) {
800 GetEcmaVM()->CollectGarbage(TriggerGCType::FULL_GC);
801 gcTriggered = true;
802 }
803 #endif
804 auto heap = const_cast<Heap *>(GetEcmaVM()->GetHeap());
805 // Handle exit app senstive scene
806 heap->HandleExitHighSensitiveEvent();
807
808 // Do not trigger local gc during the shared gc processRset process.
809 if (IsProcessingLocalToSharedRset()) {
810 return false;
811 }
812 // After concurrent mark finish, should trigger gc here to avoid create much floating garbage
813 // except in serialize or high sensitive event
814 if (IsMarkFinished() && heap->GetConcurrentMarker()->IsTriggeredConcurrentMark()
815 && !heap->GetOnSerializeEvent() && !heap->InSensitiveStatus()) {
816 heap->SetCanThrowOOMError(false);
817 heap->GetConcurrentMarker()->HandleMarkingFinished();
818 heap->SetCanThrowOOMError(true);
819 gcTriggered = true;
820 }
821 return gcTriggered;
822 }
823
CheckJSTaggedType(JSTaggedType value) const824 void JSThread::CheckJSTaggedType(JSTaggedType value) const
825 {
826 if (JSTaggedValue(value).IsHeapObject() &&
827 !GetEcmaVM()->GetHeap()->IsAlive(reinterpret_cast<TaggedObject *>(value))) {
828 LOG_FULL(FATAL) << "value:" << value << " is invalid!";
829 }
830 }
831
CpuProfilerCheckJSTaggedType(JSTaggedType value) const832 bool JSThread::CpuProfilerCheckJSTaggedType(JSTaggedType value) const
833 {
834 if (JSTaggedValue(value).IsHeapObject() &&
835 !GetEcmaVM()->GetHeap()->IsAlive(reinterpret_cast<TaggedObject *>(value))) {
836 return false;
837 }
838 return true;
839 }
840
841 // static
GetAsmStackLimit()842 size_t JSThread::GetAsmStackLimit()
843 {
844 #if !defined(PANDA_TARGET_WINDOWS) && !defined(PANDA_TARGET_MACOS) && !defined(PANDA_TARGET_IOS)
845 // js stack limit
846 size_t result = GetCurrentStackPosition() - EcmaParamConfiguration::GetDefalutStackSize();
847 int ret = -1;
848 void *stackAddr = nullptr;
849 size_t size = 0;
850 #if defined(ENABLE_FFRT_INTERFACES)
851 if (!ffrt_get_current_coroutine_stack(&stackAddr, &size)) {
852 pthread_attr_t attr;
853 ret = pthread_getattr_np(pthread_self(), &attr);
854 if (ret != 0) {
855 LOG_ECMA(ERROR) << "Get current thread attr failed";
856 return result;
857 }
858 ret = pthread_attr_getstack(&attr, &stackAddr, &size);
859 if (pthread_attr_destroy(&attr) != 0) {
860 LOG_ECMA(ERROR) << "Destroy current thread attr failed";
861 }
862 if (ret != 0) {
863 LOG_ECMA(ERROR) << "Get current thread stack size failed";
864 return result;
865 }
866 }
867 #else
868 pthread_attr_t attr;
869 ret = pthread_getattr_np(pthread_self(), &attr);
870 if (ret != 0) {
871 LOG_ECMA(ERROR) << "Get current thread attr failed";
872 return result;
873 }
874 ret = pthread_attr_getstack(&attr, &stackAddr, &size);
875 if (pthread_attr_destroy(&attr) != 0) {
876 LOG_ECMA(ERROR) << "Destroy current thread attr failed";
877 }
878 if (ret != 0) {
879 LOG_ECMA(ERROR) << "Get current thread stack size failed";
880 return result;
881 }
882 #endif
883
884 bool isMainThread = IsMainThread();
885 uintptr_t threadStackLimit = reinterpret_cast<uintptr_t>(stackAddr);
886 uintptr_t threadStackStart = threadStackLimit + size;
887 if (isMainThread) {
888 struct rlimit rl;
889 ret = getrlimit(RLIMIT_STACK, &rl);
890 if (ret != 0) {
891 LOG_ECMA(ERROR) << "Get current thread stack size failed";
892 return result;
893 }
894 if (rl.rlim_cur > DEFAULT_MAX_SYSTEM_STACK_SIZE) {
895 LOG_ECMA(ERROR) << "Get current thread stack size exceed " << DEFAULT_MAX_SYSTEM_STACK_SIZE
896 << " : " << rl.rlim_cur;
897 return result;
898 }
899 threadStackLimit = threadStackStart - rl.rlim_cur;
900 }
901
902 if (result < threadStackLimit) {
903 result = threadStackLimit;
904 }
905
906 LOG_INTERPRETER(DEBUG) << "Current thread stack start: " << reinterpret_cast<void *>(threadStackStart);
907 LOG_INTERPRETER(DEBUG) << "Used stack before js stack start: "
908 << reinterpret_cast<void *>(threadStackStart - GetCurrentStackPosition());
909 LOG_INTERPRETER(DEBUG) << "Current thread asm stack limit: " << reinterpret_cast<void *>(result);
910
911 // To avoid too much times of stack overflow checking, we only check stack overflow before push vregs or
912 // parameters of variable length. So we need a reserved size of stack to make sure stack won't be overflowed
913 // when push other data.
914 result += EcmaParamConfiguration::GetDefaultReservedStackSize();
915 if (threadStackStart <= result) {
916 LOG_FULL(FATAL) << "Too small stackSize to run jsvm";
917 }
918 return result;
919 #else
920 return 0;
921 #endif
922 }
923
IsLegalAsmSp(uintptr_t sp) const924 bool JSThread::IsLegalAsmSp(uintptr_t sp) const
925 {
926 uint64_t bottom = GetStackLimit() - EcmaParamConfiguration::GetDefaultReservedStackSize();
927 uint64_t top = GetStackStart() + EcmaParamConfiguration::GetAllowedUpperStackDiff();
928 return (bottom <= sp && sp <= top);
929 }
930
IsLegalThreadSp(uintptr_t sp) const931 bool JSThread::IsLegalThreadSp(uintptr_t sp) const
932 {
933 uintptr_t bottom = reinterpret_cast<uintptr_t>(glueData_.frameBase_);
934 size_t maxStackSize = vm_->GetEcmaParamConfiguration().GetMaxStackSize();
935 uintptr_t top = bottom + maxStackSize;
936 return (bottom <= sp && sp <= top);
937 }
938
IsLegalSp(uintptr_t sp) const939 bool JSThread::IsLegalSp(uintptr_t sp) const
940 {
941 return IsLegalAsmSp(sp) || IsLegalThreadSp(sp);
942 }
943
IsMainThread()944 bool JSThread::IsMainThread()
945 {
946 #if !defined(PANDA_TARGET_WINDOWS) && !defined(PANDA_TARGET_MACOS) && !defined(PANDA_TARGET_IOS)
947 return getpid() == syscall(SYS_gettid);
948 #else
949 return true;
950 #endif
951 }
952
PushContext(EcmaContext * context)953 void JSThread::PushContext(EcmaContext *context)
954 {
955 const_cast<Heap *>(vm_->GetHeap())->WaitAllTasksFinished();
956 contexts_.emplace_back(context);
957
958 if (!glueData_.currentContext_) {
959 // The first context in ecma vm.
960 glueData_.currentContext_ = context;
961 context->SetFramePointers(const_cast<JSTaggedType *>(GetCurrentSPFrame()),
962 const_cast<JSTaggedType *>(GetLastLeaveFrame()),
963 const_cast<JSTaggedType *>(GetLastFp()));
964 context->SetFrameBase(glueData_.frameBase_);
965 context->SetStackLimit(glueData_.stackLimit_);
966 context->SetStackStart(glueData_.stackStart_);
967 } else {
968 // algin with 16
969 size_t maxStackSize = vm_->GetEcmaParamConfiguration().GetMaxStackSize();
970 context->SetFrameBase(static_cast<JSTaggedType *>(
971 vm_->GetNativeAreaAllocator()->Allocate(sizeof(JSTaggedType) * maxStackSize)));
972 context->SetFramePointers(context->GetFrameBase() + maxStackSize, nullptr, nullptr);
973 context->SetStackLimit(GetAsmStackLimit());
974 context->SetStackStart(GetCurrentStackPosition());
975 EcmaInterpreter::InitStackFrame(context);
976 }
977 }
978
PopContext()979 void JSThread::PopContext()
980 {
981 contexts_.pop_back();
982 glueData_.currentContext_ = contexts_.back();
983 }
984
SwitchCurrentContext(EcmaContext * currentContext,bool isInIterate)985 void JSThread::SwitchCurrentContext(EcmaContext *currentContext, bool isInIterate)
986 {
987 ASSERT(std::count(contexts_.begin(), contexts_.end(), currentContext));
988
989 glueData_.currentContext_->SetFramePointers(const_cast<JSTaggedType *>(GetCurrentSPFrame()),
990 const_cast<JSTaggedType *>(GetLastLeaveFrame()),
991 const_cast<JSTaggedType *>(GetLastFp()));
992 glueData_.currentContext_->SetFrameBase(glueData_.frameBase_);
993 glueData_.currentContext_->SetStackLimit(GetStackLimit());
994 glueData_.currentContext_->SetStackStart(GetStackStart());
995 glueData_.currentContext_->SetGlobalEnv(GetGlueGlobalEnv());
996 // When the glueData_.currentContext_ is not fully initialized,glueData_.globalObject_ will be hole.
997 // Assigning hole to JSGlobalObject could cause a mistake at builtins initalization.
998 if (!glueData_.globalObject_.IsHole()) {
999 glueData_.currentContext_->GetGlobalEnv()->SetJSGlobalObject(this, glueData_.globalObject_);
1000 }
1001
1002 SetCurrentSPFrame(currentContext->GetCurrentFrame());
1003 SetLastLeaveFrame(currentContext->GetLeaveFrame());
1004 SetLastFp(currentContext->GetLastFp());
1005 glueData_.frameBase_ = currentContext->GetFrameBase();
1006 glueData_.stackLimit_ = currentContext->GetStackLimit();
1007 glueData_.stackStart_ = currentContext->GetStackStart();
1008 if (!currentContext->GlobalEnvIsHole()) {
1009 SetGlueGlobalEnv(*(currentContext->GetGlobalEnv()));
1010 /**
1011 * GlobalObject has two copies, one in GlueData and one in Context.GlobalEnv, when switch context, will save
1012 * GlobalObject in GlueData to CurrentContext.GlobalEnv(is this nessary?), and then switch to new context,
1013 * save the GlobalObject in NewContext.GlobalEnv to GlueData.
1014 * The initial value of GlobalObject in Context.GlobalEnv is Undefined, but in GlueData is Hole,
1015 * so if two SharedGC happened during the builtins initalization like this, maybe will cause incorrect scene:
1016 *
1017 * Default:
1018 * Slot for GlobalObject: Context.GlobalEnv GlueData
1019 * value: Undefined Hole
1020 *
1021 * First SharedGC(JSThread::SwitchCurrentContext), Set GlobalObject from Context.GlobalEnv to GlueData:
1022 * Slot for GlobalObject: Context.GlobalEnv GlueData
1023 * value: Undefined Undefined
1024 *
1025 * Builtins Initialize, Create GlobalObject and Set to Context.GlobalEnv:
1026 * Slot for GlobalObject: Context.GlobalEnv GlueData
1027 * value: Obj Undefined
1028 *
1029 * Second SharedGC(JSThread::SwitchCurrentContext), Set GlobalObject from GlueData to Context.GlobalEnv:
1030 * Slot for GlobalObject: Context.GlobalEnv GlueData
1031 * value: Undefined Undefined
1032 *
1033 * So when copy values between Context.GlobalEnv and GlueData, need to check if the value is Hole in GlueData,
1034 * and if is Undefined in Context.GlobalEnv, because the initial value is different.
1035 */
1036 if (!currentContext->GetGlobalEnv()->GetGlobalObject().IsUndefined()) {
1037 SetGlobalObject(currentContext->GetGlobalEnv()->GetGlobalObject());
1038 }
1039 }
1040 if (!isInIterate) {
1041 // If isInIterate is true, it means it is in GC iterate and global variables are no need to change.
1042 glueData_.globalConst_ = const_cast<GlobalEnvConstants *>(currentContext->GlobalConstants());
1043 }
1044
1045 glueData_.currentContext_ = currentContext;
1046 }
1047
EraseContext(EcmaContext * context)1048 bool JSThread::EraseContext(EcmaContext *context)
1049 {
1050 const_cast<Heap *>(vm_->GetHeap())->WaitAllTasksFinished();
1051 bool isCurrentContext = false;
1052 auto iter = std::find(contexts_.begin(), contexts_.end(), context);
1053 if (*iter == context) {
1054 if (glueData_.currentContext_ == context) {
1055 isCurrentContext = true;
1056 }
1057 contexts_.erase(iter);
1058 if (isCurrentContext) {
1059 SwitchCurrentContext(contexts_.back());
1060 }
1061 return true;
1062 }
1063 return false;
1064 }
1065
ClearContextCachedConstantPool()1066 void JSThread::ClearContextCachedConstantPool()
1067 {
1068 for (EcmaContext *context : contexts_) {
1069 context->ClearCachedConstantPool();
1070 }
1071 }
1072
GetPropertiesCache() const1073 PropertiesCache *JSThread::GetPropertiesCache() const
1074 {
1075 return glueData_.currentContext_->GetPropertiesCache();
1076 }
1077
GetFirstGlobalConst() const1078 const GlobalEnvConstants *JSThread::GetFirstGlobalConst() const
1079 {
1080 return contexts_[0]->GlobalConstants();
1081 }
1082
IsAllContextsInitialized() const1083 bool JSThread::IsAllContextsInitialized() const
1084 {
1085 return contexts_.back()->IsInitialized();
1086 }
1087
IsReadyToUpdateDetector() const1088 bool JSThread::IsReadyToUpdateDetector() const
1089 {
1090 return !GetEnableLazyBuiltins() && IsAllContextsInitialized();
1091 }
1092
GetOrCreateRegExpCache()1093 Area *JSThread::GetOrCreateRegExpCache()
1094 {
1095 if (regExpCache_ == nullptr) {
1096 regExpCache_ = nativeAreaAllocator_->AllocateArea(MAX_REGEXP_CACHE_SIZE);
1097 }
1098 return regExpCache_;
1099 }
1100
InitializeBuiltinObject(const std::string & key)1101 void JSThread::InitializeBuiltinObject(const std::string& key)
1102 {
1103 BuiltinIndex& builtins = BuiltinIndex::GetInstance();
1104 auto index = builtins.GetBuiltinIndex(key);
1105 ASSERT(index != BuiltinIndex::NOT_FOUND);
1106 /*
1107 If using `auto globalObject = GetEcmaVM()->GetGlobalEnv()->GetGlobalObject()` here,
1108 it will cause incorrect result in multi-context environment. For example:
1109
1110 ```ts
1111 let obj = {};
1112 print(obj instanceof Object); // instead of true, will print false
1113 ```
1114 */
1115 auto globalObject = contexts_.back()->GetGlobalEnv()->GetGlobalObject();
1116 auto jsObject = JSHandle<JSObject>(this, globalObject);
1117 auto box = jsObject->GetGlobalPropertyBox(this, key);
1118 if (box == nullptr) {
1119 return;
1120 }
1121 auto& entry = glueData_.builtinEntries_.builtin_[index];
1122 entry.box_ = JSTaggedValue::Cast(box);
1123 auto builtin = JSHandle<JSObject>(this, box->GetValue());
1124 auto hclass = builtin->GetJSHClass();
1125 entry.hClass_ = JSTaggedValue::Cast(hclass);
1126 }
1127
InitializeBuiltinObject()1128 void JSThread::InitializeBuiltinObject()
1129 {
1130 BuiltinIndex& builtins = BuiltinIndex::GetInstance();
1131 for (auto key: builtins.GetBuiltinKeys()) {
1132 InitializeBuiltinObject(key);
1133 }
1134 }
1135
IsPropertyCacheCleared() const1136 bool JSThread::IsPropertyCacheCleared() const
1137 {
1138 for (EcmaContext *context : contexts_) {
1139 if (!context->GetPropertiesCache()->IsCleared()) {
1140 return false;
1141 }
1142 }
1143 return true;
1144 }
1145
UpdateState(ThreadState newState)1146 void JSThread::UpdateState(ThreadState newState)
1147 {
1148 ThreadState oldState = GetState();
1149 if (oldState == ThreadState::RUNNING && newState != ThreadState::RUNNING) {
1150 TransferFromRunningToSuspended(newState);
1151 } else if (oldState != ThreadState::RUNNING && newState == ThreadState::RUNNING) {
1152 TransferToRunning();
1153 } else {
1154 // Here can be some extra checks...
1155 StoreState(newState);
1156 }
1157 }
1158
SuspendThread(bool internalSuspend,SuspendBarrier * barrier)1159 void JSThread::SuspendThread(bool internalSuspend, SuspendBarrier* barrier)
1160 {
1161 LockHolder lock(suspendLock_);
1162 if (!internalSuspend) {
1163 // do smth here if we want to combine internal and external suspension
1164 }
1165
1166 uint32_t old_count = suspendCount_++;
1167 if (old_count == 0) {
1168 SetFlag(ThreadFlag::SUSPEND_REQUEST);
1169 SetCheckSafePointStatus();
1170 }
1171
1172 if (barrier != nullptr) {
1173 ASSERT(suspendBarrier_ == nullptr);
1174 suspendBarrier_ = barrier;
1175 SetFlag(ThreadFlag::ACTIVE_BARRIER);
1176 SetCheckSafePointStatus();
1177 }
1178 }
1179
ResumeThread(bool internalSuspend)1180 void JSThread::ResumeThread(bool internalSuspend)
1181 {
1182 LockHolder lock(suspendLock_);
1183 if (!internalSuspend) {
1184 // do smth here if we want to combine internal and external suspension
1185 }
1186 if (suspendCount_ > 0) {
1187 suspendCount_--;
1188 if (suspendCount_ == 0) {
1189 ClearFlag(ThreadFlag::SUSPEND_REQUEST);
1190 ResetCheckSafePointStatus();
1191 }
1192 }
1193 suspendCondVar_.Signal();
1194 }
1195
WaitSuspension()1196 void JSThread::WaitSuspension()
1197 {
1198 constexpr int TIMEOUT = 100;
1199 ThreadState oldState = GetState();
1200 UpdateState(ThreadState::IS_SUSPENDED);
1201 {
1202 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SuspendTime::WaitSuspension");
1203 LockHolder lock(suspendLock_);
1204 while (suspendCount_ > 0) {
1205 suspendCondVar_.TimedWait(&suspendLock_, TIMEOUT);
1206 // we need to do smth if Runtime is terminating at this point
1207 }
1208 ASSERT(!HasSuspendRequest());
1209 }
1210 UpdateState(oldState);
1211 }
1212
ManagedCodeBegin()1213 void JSThread::ManagedCodeBegin()
1214 {
1215 ASSERT(!IsInManagedState());
1216 UpdateState(ThreadState::RUNNING);
1217 }
1218
ManagedCodeEnd()1219 void JSThread::ManagedCodeEnd()
1220 {
1221 ASSERT(IsInManagedState());
1222 UpdateState(ThreadState::NATIVE);
1223 }
1224
TransferFromRunningToSuspended(ThreadState newState)1225 void JSThread::TransferFromRunningToSuspended(ThreadState newState)
1226 {
1227 ASSERT(currentThread == this);
1228 StoreSuspendedState(newState);
1229 CheckAndPassActiveBarrier();
1230 }
1231
TransferToRunning()1232 void JSThread::TransferToRunning()
1233 {
1234 ASSERT(!IsDaemonThread());
1235 ASSERT(currentThread == this);
1236 StoreRunningState(ThreadState::RUNNING);
1237 // Invoke free weak global callback when thread switch to running
1238 if (!weakNodeFreeGlobalCallbacks_.empty()) {
1239 InvokeWeakNodeFreeGlobalCallBack();
1240 }
1241 if (!vm_->GetSharedNativePointerCallbacks().empty()) {
1242 InvokeSharedNativePointerCallbacks();
1243 }
1244 if (fullMarkRequest_) {
1245 fullMarkRequest_ = const_cast<Heap*>(vm_->GetHeap())->TryTriggerFullMarkBySharedLimit();
1246 }
1247 }
1248
TransferDaemonThreadToRunning()1249 void JSThread::TransferDaemonThreadToRunning()
1250 {
1251 ASSERT(IsDaemonThread());
1252 ASSERT(currentThread == this);
1253 StoreRunningState(ThreadState::RUNNING);
1254 }
1255
StoreState(ThreadState newState)1256 inline void JSThread::StoreState(ThreadState newState)
1257 {
1258 while (true) {
1259 ThreadStateAndFlags oldStateAndFlags;
1260 oldStateAndFlags.asInt = glueData_.stateAndFlags_.asInt;
1261
1262 ThreadStateAndFlags newStateAndFlags;
1263 newStateAndFlags.asStruct.flags = oldStateAndFlags.asStruct.flags;
1264 newStateAndFlags.asStruct.state = newState;
1265
1266 bool done = glueData_.stateAndFlags_.asAtomicInt.compare_exchange_weak(oldStateAndFlags.asNonvolatileInt,
1267 newStateAndFlags.asNonvolatileInt,
1268 std::memory_order_release);
1269 if (LIKELY(done)) {
1270 break;
1271 }
1272 }
1273 }
1274
StoreRunningState(ThreadState newState)1275 void JSThread::StoreRunningState(ThreadState newState)
1276 {
1277 ASSERT(newState == ThreadState::RUNNING);
1278 while (true) {
1279 ThreadStateAndFlags oldStateAndFlags;
1280 oldStateAndFlags.asInt = glueData_.stateAndFlags_.asInt;
1281 ASSERT(oldStateAndFlags.asStruct.state != ThreadState::RUNNING);
1282
1283 if (LIKELY(oldStateAndFlags.asStruct.flags == ThreadFlag::NO_FLAGS)) {
1284 ThreadStateAndFlags newStateAndFlags;
1285 newStateAndFlags.asStruct.flags = oldStateAndFlags.asStruct.flags;
1286 newStateAndFlags.asStruct.state = newState;
1287
1288 if (glueData_.stateAndFlags_.asAtomicInt.compare_exchange_weak(oldStateAndFlags.asNonvolatileInt,
1289 newStateAndFlags.asNonvolatileInt,
1290 std::memory_order_release)) {
1291 break;
1292 }
1293 } else if ((oldStateAndFlags.asStruct.flags & ThreadFlag::ACTIVE_BARRIER) != 0) {
1294 PassSuspendBarrier();
1295 } else if ((oldStateAndFlags.asStruct.flags & ThreadFlag::SUSPEND_REQUEST) != 0) {
1296 constexpr int TIMEOUT = 100;
1297 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SuspendTime::StoreRunningState");
1298 LockHolder lock(suspendLock_);
1299 while (suspendCount_ > 0) {
1300 suspendCondVar_.TimedWait(&suspendLock_, TIMEOUT);
1301 }
1302 ASSERT(!HasSuspendRequest());
1303 }
1304 }
1305 }
1306
StoreSuspendedState(ThreadState newState)1307 inline void JSThread::StoreSuspendedState(ThreadState newState)
1308 {
1309 ASSERT(newState != ThreadState::RUNNING);
1310 StoreState(newState);
1311 }
1312
PostFork()1313 void JSThread::PostFork()
1314 {
1315 SetThreadId();
1316 if (currentThread == nullptr) {
1317 currentThread = this;
1318 ASSERT(GetState() == ThreadState::CREATED);
1319 UpdateState(ThreadState::NATIVE);
1320 } else {
1321 // We tried to call fork in the same thread
1322 ASSERT(currentThread == this);
1323 ASSERT(GetState() == ThreadState::NATIVE);
1324 }
1325 }
1326 #ifndef NDEBUG
IsInManagedState() const1327 bool JSThread::IsInManagedState() const
1328 {
1329 ASSERT(this == JSThread::GetCurrent());
1330 return GetState() == ThreadState::RUNNING;
1331 }
1332
GetMutatorLockState() const1333 MutatorLock::MutatorLockState JSThread::GetMutatorLockState() const
1334 {
1335 return mutatorLockState_;
1336 }
1337
SetMutatorLockState(MutatorLock::MutatorLockState newState)1338 void JSThread::SetMutatorLockState(MutatorLock::MutatorLockState newState)
1339 {
1340 mutatorLockState_ = newState;
1341 }
1342 #endif
1343 } // namespace panda::ecmascript
1344