1 /**
2 * Copyright (c) 2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "runtime/mem/heap_manager.h"
17 #include "runtime/mem/lock_config_helper.h"
18 #include "runtime/mem/internal_allocator-inl.h"
19
20 #include <string>
21
22 #include "libpandabase/mem/mmap_mem_pool-inl.h"
23 #include "libpandabase/mem/pool_manager.h"
24 #include "runtime/handle_base-inl.h"
25 #include "runtime/include/locks.h"
26 #include "runtime/include/runtime_notification.h"
27 #include "runtime/include/thread_scopes.h"
28 #include "runtime/include/panda_vm.h"
29 #include "runtime/include/runtime.h"
30 #include "runtime/mem/gc/epsilon/epsilon.h"
31 #include "runtime/mem/gc/epsilon-g1/epsilon-g1.h"
32 #include "runtime/mem/gc/gen-gc/gen-gc.h"
33 #include "runtime/mem/gc/g1/g1-gc.h"
34 #include "runtime/mem/gc/stw-gc/stw-gc.h"
35
36 namespace ark::mem {
37
Initialize(GCType gcType,MTModeT multithreadingMode,bool useTlab,MemStatsType * memStats,InternalAllocatorPtr internalAllocator,bool createPygoteSpace)38 bool HeapManager::Initialize(GCType gcType, MTModeT multithreadingMode, bool useTlab, MemStatsType *memStats,
39 InternalAllocatorPtr internalAllocator, bool createPygoteSpace)
40 {
41 trace::ScopedTrace scopedTrace("HeapManager::Initialize");
42 bool ret = false;
43 memStats_ = memStats;
44 internalAllocator_ = internalAllocator;
45 switch (gcType) {
46 case GCType::EPSILON_GC: {
47 ret = Initialize<GCType::EPSILON_GC>(memStats, multithreadingMode, createPygoteSpace);
48 break;
49 }
50 case GCType::EPSILON_G1_GC: {
51 ret = Initialize<GCType::EPSILON_G1_GC>(memStats, multithreadingMode, createPygoteSpace);
52 break;
53 }
54 case GCType::STW_GC: {
55 ret = Initialize<GCType::STW_GC>(memStats, multithreadingMode, createPygoteSpace);
56 break;
57 }
58 case GCType::GEN_GC: {
59 ret = Initialize<GCType::GEN_GC>(memStats, multithreadingMode, createPygoteSpace);
60 break;
61 }
62 case GCType::G1_GC: {
63 ret = Initialize<GCType::G1_GC>(memStats, multithreadingMode, createPygoteSpace);
64 break;
65 }
66 default:
67 LOG(FATAL, GC) << "Invalid init for gc_type = " << static_cast<int>(gcType);
68 break;
69 }
70 // We want to use common allocate scenario in AOT/JIT/Irtoc code with option run-gc-every-safepoint
71 // to check state of memory in TriggerGCIfNeeded
72 if (!objectAllocator_.AsObjectAllocator()->IsTLABSupported() || Runtime::GetOptions().IsRunGcEverySafepoint()) {
73 useTlab = false;
74 }
75 useTlabForAllocations_ = useTlab;
76 if (useTlabForAllocations_ && Runtime::GetOptions().IsAdaptiveTlabSize()) {
77 isAdaptiveTlabSize_ = true;
78 }
79 // Now, USE_TLAB_FOR_ALLOCATIONS option is supported only for Generational GCs
80 ASSERT(IsGenerationalGCType(gcType) || (!useTlabForAllocations_));
81 return ret;
82 }
83
SetPandaVM(PandaVM * vm)84 void HeapManager::SetPandaVM(PandaVM *vm)
85 {
86 vm_ = vm;
87 gc_ = vm_->GetGC();
88 notificationManager_ = Runtime::GetCurrent()->GetNotificationManager();
89 }
90
Finalize()91 bool HeapManager::Finalize()
92 {
93 delete codeAllocator_;
94 codeAllocator_ = nullptr;
95 objectAllocator_->VisitAndRemoveAllPools(
96 [](void *mem, [[maybe_unused]] size_t size) { PoolManager::GetMmapMemPool()->FreePool(mem, size); });
97 delete static_cast<Allocator *>(objectAllocator_);
98
99 return true;
100 }
101
AllocateObject(BaseClass * cls,size_t size,Alignment align,ManagedThread * thread,ObjectAllocatorBase::ObjMemInitPolicy objInitType,bool pinned)102 ObjectHeader *HeapManager::AllocateObject(BaseClass *cls, size_t size, Alignment align, ManagedThread *thread,
103 ObjectAllocatorBase::ObjMemInitPolicy objInitType, bool pinned)
104 {
105 ASSERT(size >= ObjectHeader::ObjectHeaderSize());
106 ASSERT(GetGC()->IsMutatorAllowed());
107 TriggerGCIfNeeded();
108 if (thread == nullptr) {
109 // NOTE(dtrubenkov): try to avoid this
110 thread = ManagedThread::GetCurrent();
111 ASSERT(thread != nullptr);
112 }
113 void *mem = AllocateMemoryForObject(size, align, thread, objInitType, pinned);
114 if (UNLIKELY(mem == nullptr)) {
115 mem = TryGCAndAlloc(size, align, thread, objInitType, pinned);
116 if (UNLIKELY(mem == nullptr)) {
117 ThrowOutOfMemoryError("AllocateObject failed");
118 return nullptr;
119 }
120 }
121 LOG(DEBUG, MM_OBJECT_EVENTS) << "Alloc object at " << mem << " size: " << size;
122 ObjectHeader *object = InitObjectHeaderAtMem(cls, mem);
123 bool isObjectFinalizable = IsObjectFinalized(cls);
124 if (UNLIKELY(isObjectFinalizable || GetNotificationManager()->HasAllocationListeners())) {
125 // Use object handle here as RegisterFinalizedObject can trigger GC
126 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
127 VMHandle<ObjectHeader> handle(thread, object);
128 RegisterFinalizedObject(handle.GetPtr(), cls, isObjectFinalizable);
129 GetNotificationManager()->ObjectAllocEvent(cls, handle.GetPtr(), thread, size);
130 object = handle.GetPtr();
131 }
132 return object;
133 }
134
TryGCAndAlloc(size_t size,Alignment align,ManagedThread * thread,ObjectAllocatorBase::ObjMemInitPolicy objInitType,bool pinned)135 void *HeapManager::TryGCAndAlloc(size_t size, Alignment align, ManagedThread *thread,
136 ObjectAllocatorBase::ObjMemInitPolicy objInitType, bool pinned)
137 {
138 // do not try many times in case of OOM scenarios.
139 constexpr size_t ALLOC_RETRY = 4;
140 size_t allocTryCnt = 0;
141 void *mem = nullptr;
142 bool isGenerational = GetGC()->IsGenerational();
143 ASSERT(!thread->HasPendingException());
144
145 while (mem == nullptr && allocTryCnt < ALLOC_RETRY) {
146 ++allocTryCnt;
147 GCTaskCause cause;
148 // add comment why -1
149 if (allocTryCnt == ALLOC_RETRY - 1 || !isGenerational) {
150 cause = GCTaskCause::OOM_CAUSE;
151 } else {
152 cause = GCTaskCause::YOUNG_GC_CAUSE;
153 }
154 GetGC()->WaitForGCInManaged(GCTask(cause));
155 mem = AllocateMemoryForObject(size, align, thread, objInitType, pinned);
156 if (mem != nullptr) {
157 // we could set OOM in gc, but we need to clear it if next gc was successfully and we allocated memory
158 thread->ClearException();
159 } else {
160 auto freedBytes = GetPandaVM()->GetGCStats()->GetObjectsFreedBytes();
161 auto lastYoungMovedBytes = GetPandaVM()->GetMemStats()->GetLastYoungObjectsMovedBytes();
162 // if last GC freed or moved from young space some bytes - it means that we have a progress in VM,
163 // just this thread was unlucky to get some memory. We reset alloc_try_cnt to try again.
164 if (freedBytes + lastYoungMovedBytes != 0) {
165 allocTryCnt = 0;
166 }
167 }
168 }
169 return mem;
170 }
171
AllocateMemoryForObject(size_t size,Alignment align,ManagedThread * thread,ObjectAllocatorBase::ObjMemInitPolicy objInitType,bool pinned)172 void *HeapManager::AllocateMemoryForObject(size_t size, Alignment align, ManagedThread *thread,
173 ObjectAllocatorBase::ObjMemInitPolicy objInitType, bool pinned)
174 {
175 void *mem = nullptr;
176 if (UseTLABForAllocations() && size <= GetTLABMaxAllocSize() && !pinned) {
177 ASSERT(thread != nullptr);
178 ASSERT(GetGC()->IsTLABsSupported());
179 // Try to allocate an object via TLAB
180 TLAB *currentTlab = thread->GetTLAB();
181 ASSERT(currentTlab != nullptr); // A thread's TLAB must be initialized at least via some ZERO tlab values.
182 mem = currentTlab->Alloc(size);
183 bool shouldAllocNewTlab =
184 !isAdaptiveTlabSize_ || currentTlab->GetFillFraction() >= TLAB::MIN_DESIRED_FILL_FRACTION;
185 if (mem == nullptr && shouldAllocNewTlab) {
186 // We couldn't allocate an object via current TLAB,
187 // Therefore, create a new one and allocate in it.
188 if (CreateNewTLAB(thread)) {
189 currentTlab = thread->GetTLAB();
190 mem = currentTlab->Alloc(size);
191 }
192 }
193 if (PANDA_TRACK_TLAB_ALLOCATIONS && (mem != nullptr)) {
194 memStats_->RecordAllocateObject(GetAlignedObjectSize(size), SpaceType::SPACE_TYPE_OBJECT);
195 }
196 }
197 if (mem == nullptr) { // if mem == nullptr, try to use common allocate scenario
198 mem = objectAllocator_.AsObjectAllocator()->Allocate(size, align, thread, objInitType, pinned);
199 }
200 return mem;
201 }
202
203 template <bool IS_FIRST_CLASS_CLASS>
AllocateNonMovableObject(BaseClass * cls,size_t size,Alignment align,ManagedThread * thread,ObjectAllocatorBase::ObjMemInitPolicy objInitType)204 ObjectHeader *HeapManager::AllocateNonMovableObject(BaseClass *cls, size_t size, Alignment align, ManagedThread *thread,
205 ObjectAllocatorBase::ObjMemInitPolicy objInitType)
206 {
207 ASSERT(size >= ObjectHeader::ObjectHeaderSize());
208 ASSERT(GetGC()->IsMutatorAllowed());
209 TriggerGCIfNeeded();
210 void *mem = objectAllocator_.AsObjectAllocator()->AllocateNonMovable(size, align, thread, objInitType);
211 if (UNLIKELY(mem == nullptr)) {
212 GCTaskCause cause = GCTaskCause::OOM_CAUSE;
213 GetGC()->WaitForGCInManaged(GCTask(cause));
214 mem = objectAllocator_.AsObjectAllocator()->AllocateNonMovable(size, align, thread, objInitType);
215 }
216 if (UNLIKELY(mem == nullptr)) {
217 if (ManagedThread::GetCurrent() != nullptr) {
218 ThrowOutOfMemoryError("AllocateNonMovableObject failed");
219 }
220 return nullptr;
221 }
222 LOG(DEBUG, MM_OBJECT_EVENTS) << "Alloc non-movable object at " << mem << " size: " << size;
223 auto *object = InitObjectHeaderAtMem(cls, mem);
224 // cls can be null for first class creation, when we create ClassRoot::Class
225 // NOLINTNEXTLINE(readability-braces-around-statements, readability-misleading-indentation)
226 if constexpr (IS_FIRST_CLASS_CLASS) {
227 ASSERT(cls == nullptr);
228 // NOLINTNEXTLINE(readability-braces-around-statements, readability-misleading-indentation)
229 } else {
230 ASSERT(cls != nullptr);
231 bool isObjectFinalizable = IsObjectFinalized(cls);
232 if (UNLIKELY(isObjectFinalizable || GetNotificationManager()->HasAllocationListeners())) {
233 if (thread == nullptr) {
234 thread = ManagedThread::GetCurrent();
235 }
236 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
237 VMHandle<ObjectHeader> handle(thread, object);
238 RegisterFinalizedObject(handle.GetPtr(), cls, isObjectFinalizable);
239 GetNotificationManager()->ObjectAllocEvent(cls, handle.GetPtr(), thread, size);
240 object = handle.GetPtr();
241 }
242 }
243 return object;
244 }
245
InitObjectHeaderAtMem(BaseClass * cls,void * mem)246 ObjectHeader *HeapManager::InitObjectHeaderAtMem(BaseClass *cls, void *mem)
247 {
248 ASSERT(mem != nullptr);
249 ASSERT(GetGC()->IsMutatorAllowed());
250
251 auto object = static_cast<ObjectHeader *>(mem);
252 // we need zeroed memory here according to ISA
253 ASSERT(object->AtomicGetMark().GetValue() == 0);
254 ASSERT(object->ClassAddr<BaseClass *>() == nullptr);
255 // The order is crucial here - we need to have 0 class word to avoid data race with concurrent sweep.
256 // Otherwise we can remove not initialized object.
257 GetGC()->InitGCBits(object);
258 object->SetClass(cls);
259 return object;
260 }
261
TriggerGCIfNeeded()262 void HeapManager::TriggerGCIfNeeded()
263 {
264 vm_->GetGCTrigger()->TriggerGcIfNeeded(GetGC());
265 }
266
AllocateExtFrame(size_t size,size_t extSz)267 Frame *HeapManager::AllocateExtFrame(size_t size, size_t extSz)
268 {
269 ASSERT(GetGC()->IsMutatorAllowed());
270 StackFrameAllocator *frameAllocator = GetCurrentStackFrameAllocator();
271 return Frame::FromExt(frameAllocator->Alloc(size), extSz);
272 }
273
CreateNewTLAB(ManagedThread * thread)274 bool HeapManager::CreateNewTLAB(ManagedThread *thread)
275 {
276 ASSERT(GetGC()->IsMutatorAllowed());
277 ASSERT(thread != nullptr);
278 size_t newTlabSize = 0;
279 TLAB *oldTlab = thread->GetTLAB();
280 ASSERT(oldTlab != nullptr);
281 if (!isAdaptiveTlabSize_) {
282 // Using initial tlab size
283 newTlabSize = Runtime::GetOptions().GetInitTlabSize();
284 } else {
285 thread->GetWeightedTlabAverage()->StoreNewSample(oldTlab->GetOccupiedSize(), oldTlab->GetSize());
286 newTlabSize = AlignUp(thread->GetWeightedTlabAverage()->GetLastCountedSumInSizeT(), DEFAULT_ALIGNMENT_IN_BYTES);
287 }
288 LOG(DEBUG, ALLOC) << "Allocating new tlab with size: " << newTlabSize;
289 ASSERT(newTlabSize != 0);
290 TLAB *newTlab = objectAllocator_.AsObjectAllocator()->CreateNewTLAB(newTlabSize);
291 if (newTlab != nullptr) {
292 RegisterTLAB(thread->GetTLAB());
293 thread->UpdateTLAB(newTlab);
294 return true;
295 }
296 return false;
297 }
298
RegisterTLAB(const TLAB * tlab)299 void HeapManager::RegisterTLAB(const TLAB *tlab)
300 {
301 ASSERT(tlab != nullptr);
302 if (!PANDA_TRACK_TLAB_ALLOCATIONS && (tlab->GetOccupiedSize() != 0)) {
303 memStats_->RecordAllocateObject(tlab->GetOccupiedSize(), SpaceType::SPACE_TYPE_OBJECT);
304 }
305 }
306
FreeExtFrame(Frame * frame,size_t extSz)307 void HeapManager::FreeExtFrame(Frame *frame, size_t extSz)
308 {
309 ASSERT(GetGC()->IsMutatorAllowed());
310 StackFrameAllocator *frameAllocator = GetCurrentStackFrameAllocator();
311 frameAllocator->Free(Frame::ToExt(frame, extSz));
312 }
313
GetCodeAllocator() const314 CodeAllocator *HeapManager::GetCodeAllocator() const
315 {
316 return codeAllocator_;
317 }
318
GetInternalAllocator()319 InternalAllocatorPtr HeapManager::GetInternalAllocator()
320 {
321 return internalAllocator_;
322 }
323
GetObjectAllocator() const324 ObjectAllocatorPtr HeapManager::GetObjectAllocator() const
325 {
326 return objectAllocator_;
327 }
328
GetCurrentStackFrameAllocator()329 StackFrameAllocator *HeapManager::GetCurrentStackFrameAllocator()
330 {
331 return ManagedThread::GetCurrent()->GetStackFrameAllocator();
332 }
333
PreZygoteFork()334 void HeapManager::PreZygoteFork()
335 {
336 GetGC()->WaitForGCOnPygoteFork(GCTask(GCTaskCause::PYGOTE_FORK_CAUSE));
337 }
338
GetTargetHeapUtilization() const339 float HeapManager::GetTargetHeapUtilization() const
340 {
341 return targetUtilization_;
342 }
343
SetTargetHeapUtilization(float target)344 void HeapManager::SetTargetHeapUtilization(float target)
345 {
346 ASSERT_PRINT(target > 0.0F && target < 1.0F, "Target heap utilization should be in the range (0,1)");
347 targetUtilization_ = target;
348 }
349
GetTotalMemory() const350 size_t HeapManager::GetTotalMemory() const
351 {
352 return GetObjectAllocator().AsObjectAllocator()->GetHeapSpace()->GetHeapSize();
353 }
354
GetFreeMemory() const355 size_t HeapManager::GetFreeMemory() const
356 {
357 return helpers::UnsignedDifference(GetTotalMemory(), vm_->GetMemStats()->GetFootprintHeap());
358 }
359
ClampNewMaxHeapSize()360 void HeapManager::ClampNewMaxHeapSize()
361 {
362 objectAllocator_.AsObjectAllocator()->GetHeapSpace()->ClampCurrentMaxHeapSize();
363 }
364
365 /**
366 * @brief Check whether the given object is an instance of the given class.
367 * @param obj - ObjectHeader pointer
368 * @param h_class - Class pointer
369 * @param assignable - whether the subclass of h_class counts
370 * @return true if obj is instanceOf h_class, otherwise false
371 */
MatchesClass(const ObjectHeader * obj,const Class * hClass,bool assignable)372 static bool MatchesClass(const ObjectHeader *obj, const Class *hClass, bool assignable)
373 {
374 if (assignable) {
375 return obj->IsInstanceOf(hClass);
376 }
377 return obj->ClassAddr<Class>() == hClass;
378 }
379
CountInstances(const PandaVector<Class * > & classes,bool assignable,uint64_t * counts)380 void HeapManager::CountInstances(const PandaVector<Class *> &classes, bool assignable, uint64_t *counts)
381 {
382 auto objectsChecker = [&](ObjectHeader *obj) {
383 for (size_t i = 0; i < classes.size(); ++i) {
384 if (classes[i] == nullptr) {
385 continue;
386 }
387 if (MatchesClass(obj, classes[i], assignable)) {
388 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
389 ++counts[i];
390 }
391 }
392 };
393 {
394 MTManagedThread *thread = MTManagedThread::GetCurrent();
395 ASSERT(thread != nullptr);
396 ScopedChangeThreadStatus sts(thread, ThreadStatus::RUNNING);
397 ScopedSuspendAllThreadsRunning ssatr(thread->GetVM()->GetRendezvous());
398 GetObjectAllocator().AsObjectAllocator()->IterateOverObjects(objectsChecker);
399 }
400 }
401
SetIsFinalizableFunc(IsObjectFinalizebleFunc func)402 void HeapManager::SetIsFinalizableFunc(IsObjectFinalizebleFunc func)
403 {
404 isObjectFinalizebleFunc_ = func;
405 }
406
SetRegisterFinalizeReferenceFunc(RegisterFinalizeReferenceFunc func)407 void HeapManager::SetRegisterFinalizeReferenceFunc(RegisterFinalizeReferenceFunc func)
408 {
409 registerFinalizeReferenceFunc_ = func;
410 }
411
IsObjectFinalized(BaseClass * cls)412 bool HeapManager::IsObjectFinalized(BaseClass *cls)
413 {
414 return isObjectFinalizebleFunc_ != nullptr && isObjectFinalizebleFunc_(cls);
415 }
416
RegisterFinalizedObject(ObjectHeader * object,BaseClass * cls,bool isObjectFinalizable)417 void HeapManager::RegisterFinalizedObject(ObjectHeader *object, BaseClass *cls, bool isObjectFinalizable)
418 {
419 if (isObjectFinalizable) {
420 ASSERT(registerFinalizeReferenceFunc_ != nullptr);
421 registerFinalizeReferenceFunc_(object, cls);
422 }
423 }
424
425 template ObjectHeader *HeapManager::AllocateNonMovableObject<true>(BaseClass *cls, size_t size, Alignment align,
426 ManagedThread *thread,
427 ObjectAllocatorBase::ObjMemInitPolicy obj_init_type);
428
429 template ObjectHeader *HeapManager::AllocateNonMovableObject<false>(
430 BaseClass *cls, size_t size, Alignment align, ManagedThread *thread,
431 ObjectAllocatorBase::ObjMemInitPolicy obj_init_type);
432 } // namespace ark::mem
433