1 /*
2 * Copyright (c) 2025 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "common_interfaces/base_runtime.h"
17
18 #include "common_components/common_runtime/base_runtime_param.h"
19 #include "common_components/common_runtime/hooks.h"
20 #include "common_components/common/page_pool.h"
21 #include "common_components/heap/allocator/region_desc.h"
22 #include "common_components/heap/collector/heuristic_gc_policy.h"
23 #include "common_components/heap/heap.h"
24 #include "common_components/heap/heap_manager.h"
25 #include "common_components/mutator/mutator_manager.h"
26 #include "common_interfaces/objects/composite_base_class.h"
27 #include "common_components/objects/string_table_internal.h"
28 #include "common_interfaces/objects/base_string_table.h"
29 #include "common_interfaces/thread/thread_state_transition.h"
30
31 namespace panda::ecmascript {
32 class TaggedObject;
33 }
34
35 namespace common {
36 using panda::ecmascript::TaggedObject;
37
38 std::mutex BaseRuntime::vmCreationLock_;
39 BaseRuntime *BaseRuntime::baseRuntimeInstance_ = nullptr;
40 bool BaseRuntime::initialized_ = false;
41
GetInstance()42 BaseRuntime *BaseRuntime::GetInstance()
43 {
44 if (UNLIKELY_CC(baseRuntimeInstance_ == nullptr)) {
45 std::unique_lock<std::mutex> lock(vmCreationLock_);
46 if (baseRuntimeInstance_ == nullptr) {
47 baseRuntimeInstance_ = new BaseRuntime();
48 }
49 }
50 return baseRuntimeInstance_;
51 }
52
DestroyInstance()53 void BaseRuntime::DestroyInstance()
54 {
55 std::unique_lock<std::mutex> lock(vmCreationLock_);
56 delete baseRuntimeInstance_;
57 baseRuntimeInstance_ = nullptr;
58 }
59
60 template<typename T>
NewAndInit()61 inline T* NewAndInit()
62 {
63 T* temp = new (std::nothrow) T();
64 LOGF_CHECK(temp != nullptr) << "NewAndInit failed";
65 temp->Init();
66 return temp;
67 }
68
69 template<typename T, typename A>
NewAndInit(A arg)70 inline T* NewAndInit(A arg)
71 {
72 T* temp = new (std::nothrow) T();
73 LOGF_CHECK(temp != nullptr) << "NewAndInit failed";
74 temp->Init(arg);
75 return temp;
76 }
77
78 template<typename T>
CheckAndFini(T * & module)79 inline void CheckAndFini(T*& module)
80 {
81 if (module != nullptr) {
82 module->Fini();
83 }
84
85 delete module;
86 module = nullptr;
87 }
88
Init()89 void BaseRuntime::Init()
90 {
91 Init(BaseRuntimeParam::DefaultRuntimeParam());
92 }
93
Init(const RuntimeParam & param)94 void BaseRuntime::Init(const RuntimeParam ¶m)
95 {
96 std::unique_lock<std::mutex> lock(vmCreationLock_);
97 if (initialized_) {
98 LOG_COMMON(FATAL) << "BaseRuntime has been initialized and don't need init again.";
99 return;
100 }
101
102 param_ = param;
103 size_t pagePoolSize = param_.heapParam.heapSize;
104 #if defined(PANDA_TARGET_32)
105 pagePoolSize = pagePoolSize / 128; // 128 means divided.
106 #endif
107 PagePool::Instance().Init(pagePoolSize * KB / COMMON_PAGE_SIZE);
108 mutatorManager_ = NewAndInit<MutatorManager>();
109 heapManager_ = NewAndInit<HeapManager>(param_);
110 baseClassRoots_ = NewAndInit<BaseClassRoots>();
111 stringTable_ = NewAndInit<BaseStringTableImpl>();
112 VLOG(INFO, "Arkcommon runtime started.");
113 // Record runtime parameter to report. heap growth value needs to plus 1.
114 VLOG(DEBUG, "Runtime parameter:\n\tHeap size: %zu(KB)\n\tRegion size: %zu(KB)\n\tExemption threshold: %.2f\n\t"
115 "Heap utilization: %.2f\n\tHeap growth: %.2f\n\tAllocation rate: %.2f(MB/s)\n\tAlloction wait time: %zuns\n\t"
116 "GC Threshold: %zu(KB)\n\tGarbage threshold: %.2f\n\tGC interval: %zums\n\tBackup GC interval: %zus\n\t"
117 "Log level: %d\n\tThread stack size: %zu(KB)\n\tArkcommon stack size: %zu(KB)\n\t"
118 "Processor number: %d", pagePoolSize, param_.heapParam.regionSize,
119 param_.heapParam.exemptionThreshold, param_.heapParam.heapUtilization, 1 + param_.heapParam.heapGrowth,
120 param_.heapParam.allocationRate, param_.heapParam.allocationWaitTime,
121 param_.gcParam.gcThreshold / KB, param_.gcParam.garbageThreshold,
122 param_.gcParam.gcInterval / MILLI_SECOND_TO_NANO_SECOND,
123 param_.gcParam.backupGCInterval / SECOND_TO_NANO_SECOND);
124
125 initialized_ = true;
126 }
127
Fini()128 void BaseRuntime::Fini()
129 {
130 std::unique_lock<std::mutex> lock(vmCreationLock_);
131 if (!initialized_) {
132 LOG_COMMON(FATAL) << "BaseRuntime has been initialized and don't need init again.";
133 return;
134 }
135
136 {
137 // since there might be failure during initialization,
138 // here we need to check and call fini.
139 CheckAndFini<HeapManager>(heapManager_);
140 CheckAndFini<MutatorManager>(mutatorManager_);
141 CheckAndFini<BaseClassRoots>(baseClassRoots_);
142 CheckAndFini<BaseStringTableImpl>(reinterpret_cast<BaseStringTableImpl*&>(stringTable_));
143 PagePool::Instance().Fini();
144 }
145
146 VLOG(INFO, "Arkcommon runtime shutdown.");
147 initialized_ = false;
148 }
149
PreFork(ThreadHolder * holder)150 void BaseRuntime::PreFork(ThreadHolder *holder)
151 {
152 // Need appspawn space and compress gc.
153 RequestGC(GC_REASON_APPSPAWN, false, GC_TYPE_FULL);
154 {
155 ThreadNativeScope scope(holder);
156 HeapManager::StopRuntimeThreads();
157 }
158 }
159
PostFork()160 void BaseRuntime::PostFork()
161 {
162 HeapManager::StartRuntimeThreads();
163 #ifdef ENABLE_COLD_STARTUP_GC_POLICY
164 StartupStatusManager::OnAppStartup();
165 #endif
166 }
167
WriteRoot(void * obj)168 void BaseRuntime::WriteRoot(void *obj)
169 {
170 Heap::GetBarrier().WriteRoot(reinterpret_cast<BaseObject *>(obj));
171 }
172
WriteBarrier(void * obj,void * field,void * ref)173 void BaseRuntime::WriteBarrier(void* obj, void* field, void* ref)
174 {
175 DCHECK_CC(field != nullptr);
176 Heap::GetBarrier().WriteBarrier(reinterpret_cast<BaseObject*>(obj),
177 *reinterpret_cast<RefField<>*>(field), reinterpret_cast<BaseObject*>(ref));
178 }
179
ReadBarrier(void * obj,void * field)180 void* BaseRuntime::ReadBarrier(void* obj, void* field)
181 {
182 return reinterpret_cast<void*>(Heap::GetBarrier().ReadRefField(reinterpret_cast<BaseObject*>(obj),
183 *reinterpret_cast<RefField<false>*>(field)));
184 }
185
ReadBarrier(void * field)186 void* BaseRuntime::ReadBarrier(void* field)
187 {
188 return reinterpret_cast<void*>(Heap::GetBarrier().ReadStaticRef(*reinterpret_cast<RefField<false>*>(field)));
189 }
190
AtomicReadBarrier(void * obj,void * field,std::memory_order order)191 void* BaseRuntime::AtomicReadBarrier(void* obj, void* field, std::memory_order order)
192 {
193 return reinterpret_cast<void*>(Heap::GetBarrier().AtomicReadRefField(reinterpret_cast<BaseObject*>(obj),
194 *reinterpret_cast<RefField<true>*>(field), order));
195 }
196
RequestGC(GCReason reason,bool async,GCType gcType)197 void BaseRuntime::RequestGC(GCReason reason, bool async, GCType gcType)
198 {
199 if (reason < GC_REASON_BEGIN || reason > GC_REASON_END ||
200 gcType < GC_TYPE_BEGIN || gcType > GC_TYPE_END) {
201 VLOG(ERROR, "Invalid gc reason or gc type, gc reason: %s, gc type: %s",
202 GCReasonToString(reason), GCTypeToString(gcType));
203 return;
204 }
205 HeapManager::RequestGC(reason, async, gcType);
206 }
207
WaitForGCFinish()208 void BaseRuntime::WaitForGCFinish() { Heap::GetHeap().WaitForGCFinish(); }
209
EnterGCCriticalSection()210 void BaseRuntime::EnterGCCriticalSection() { return Heap::GetHeap().MarkGCStart(); }
ExitGCCriticalSection()211 void BaseRuntime::ExitGCCriticalSection() { return Heap::GetHeap().MarkGCFinish(); }
212
ForEachObj(HeapVisitor & visitor,bool safe)213 bool BaseRuntime::ForEachObj(HeapVisitor& visitor, bool safe)
214 {
215 return Heap::GetHeap().ForEachObject(visitor, safe);
216 }
217
NotifyNativeAllocation(size_t bytes)218 void BaseRuntime::NotifyNativeAllocation(size_t bytes)
219 {
220 Heap::GetHeap().NotifyNativeAllocation(bytes);
221 }
222
NotifyNativeFree(size_t bytes)223 void BaseRuntime::NotifyNativeFree(size_t bytes)
224 {
225 Heap::GetHeap().NotifyNativeFree(bytes);
226 }
227
NotifyNativeReset(size_t oldBytes,size_t newBytes)228 void BaseRuntime::NotifyNativeReset(size_t oldBytes, size_t newBytes)
229 {
230 Heap::GetHeap().NotifyNativeReset(oldBytes, newBytes);
231 }
232
GetNotifiedNativeSize()233 size_t BaseRuntime::GetNotifiedNativeSize()
234 {
235 return Heap::GetHeap().GetNotifiedNativeSize();
236 }
237
ChangeGCParams(bool isBackground)238 void BaseRuntime::ChangeGCParams(bool isBackground)
239 {
240 return Heap::GetHeap().ChangeGCParams(isBackground);
241 }
242
CheckAndTriggerHintGC(MemoryReduceDegree degree)243 bool BaseRuntime::CheckAndTriggerHintGC(MemoryReduceDegree degree)
244 {
245 return Heap::GetHeap().CheckAndTriggerHintGC(degree);
246 }
247
NotifyHighSensitive(bool isStart)248 void BaseRuntime::NotifyHighSensitive(bool isStart)
249 {
250 Heap::GetHeap().NotifyHighSensitive(isStart);
251 }
252 } // namespace common
253