• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2024-2025 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "runtime/mem/heap_manager.h"
17 #include "runtime/mem/lock_config_helper.h"
18 #include "runtime/mem/internal_allocator-inl.h"
19 
20 #include <string>
21 
22 #include "libpandabase/mem/mmap_mem_pool-inl.h"
23 #include "libpandabase/mem/pool_manager.h"
24 #include "runtime/handle_base-inl.h"
25 #include "runtime/include/locks.h"
26 #include "runtime/include/runtime_notification.h"
27 #include "runtime/include/thread_scopes.h"
28 #include "runtime/include/panda_vm.h"
29 #include "runtime/include/runtime.h"
30 #include "runtime/mem/gc/epsilon/epsilon.h"
31 #include "runtime/mem/gc/epsilon-g1/epsilon-g1.h"
32 #include "runtime/mem/gc/gen-gc/gen-gc.h"
33 #include "runtime/mem/gc/g1/g1-gc.h"
34 #include "runtime/mem/gc/stw-gc/stw-gc.h"
35 #include "runtime/mem/gc/cmc-gc-adapter/cmc-gc-adapter.h"
36 
37 namespace ark::mem {
38 
Initialize(GCType gcType,MTModeT multithreadingMode,bool useTlab,MemStatsType * memStats,InternalAllocatorPtr internalAllocator,bool createPygoteSpace)39 bool HeapManager::Initialize(GCType gcType, MTModeT multithreadingMode, bool useTlab, MemStatsType *memStats,
40                              InternalAllocatorPtr internalAllocator, bool createPygoteSpace)
41 {
42     trace::ScopedTrace scopedTrace("HeapManager::Initialize");
43     bool ret = false;
44     memStats_ = memStats;
45     internalAllocator_ = internalAllocator;
46     switch (gcType) {
47         case GCType::EPSILON_GC: {
48             ret = Initialize<GCType::EPSILON_GC>(memStats, multithreadingMode, createPygoteSpace);
49             break;
50         }
51         case GCType::EPSILON_G1_GC: {
52             ret = Initialize<GCType::EPSILON_G1_GC>(memStats, multithreadingMode, createPygoteSpace);
53             break;
54         }
55         case GCType::STW_GC: {
56             ret = Initialize<GCType::STW_GC>(memStats, multithreadingMode, createPygoteSpace);
57             break;
58         }
59         case GCType::GEN_GC: {
60             ret = Initialize<GCType::GEN_GC>(memStats, multithreadingMode, createPygoteSpace);
61             break;
62         }
63         case GCType::G1_GC: {
64             ret = Initialize<GCType::G1_GC>(memStats, multithreadingMode, createPygoteSpace);
65             break;
66         }
67         case GCType::CMC_GC: {
68             ret = Initialize<GCType::CMC_GC>(memStats, multithreadingMode, createPygoteSpace);
69             break;
70         }
71         default:
72             LOG(FATAL, GC) << "Invalid init for gc_type = " << static_cast<int>(gcType);
73             break;
74     }
75     // We want to use common allocate scenario in AOT/JIT/Irtoc code with option run-gc-every-safepoint
76     // to check state of memory in TriggerGCIfNeeded
77     if (!objectAllocator_.AsObjectAllocator()->IsTLABSupported() || Runtime::GetOptions().IsRunGcEverySafepoint()) {
78         useTlab = false;
79     }
80     useTlabForAllocations_ = useTlab;
81     if (useTlabForAllocations_ && Runtime::GetOptions().IsAdaptiveTlabSize()) {
82         isAdaptiveTlabSize_ = true;
83     }
84     // Now, USE_TLAB_FOR_ALLOCATIONS option is supported only for Generational GCs
85     ASSERT(IsGenerationalGCType(gcType) || (!useTlabForAllocations_));
86     return ret;
87 }
88 
SetPandaVM(PandaVM * vm)89 void HeapManager::SetPandaVM(PandaVM *vm)
90 {
91     vm_ = vm;
92     gc_ = vm_->GetGC();
93     notificationManager_ = Runtime::GetCurrent()->GetNotificationManager();
94 }
95 
Finalize()96 bool HeapManager::Finalize()
97 {
98     delete codeAllocator_;
99     codeAllocator_ = nullptr;
100     objectAllocator_->VisitAndRemoveAllPools(
101         [](void *mem, [[maybe_unused]] size_t size) { PoolManager::GetMmapMemPool()->FreePool(mem, size); });
102     delete static_cast<Allocator *>(objectAllocator_);
103 
104     return true;
105 }
106 
AllocateObject(BaseClass * cls,size_t size,Alignment align,ManagedThread * thread,ObjectAllocatorBase::ObjMemInitPolicy objInitType,bool pinned)107 ObjectHeader *HeapManager::AllocateObject(BaseClass *cls, size_t size, Alignment align, ManagedThread *thread,
108                                           ObjectAllocatorBase::ObjMemInitPolicy objInitType, bool pinned)
109 {
110     ASSERT(size >= ObjectHeader::ObjectHeaderSize());
111     ASSERT(GetGC()->IsMutatorAllowed());
112     TriggerGCIfNeeded();
113     if (thread == nullptr) {
114         // NOTE(dtrubenkov): try to avoid this
115         thread = ManagedThread::GetCurrent();
116         ASSERT(thread != nullptr);
117     }
118     void *mem = AllocateMemoryForObject(size, align, thread, objInitType, pinned);
119     if (UNLIKELY(mem == nullptr)) {
120         mem = TryGCAndAlloc(size, align, thread, objInitType, pinned);
121         if (UNLIKELY(mem == nullptr)) {
122             ThrowOutOfMemoryError("AllocateObject failed");
123             return nullptr;
124         }
125     }
126     LOG(DEBUG, MM_OBJECT_EVENTS) << "Alloc object at " << mem << " size: " << size << " cls: " << cls;
127     ObjectHeader *object = InitObjectHeaderAtMem(cls, mem);
128     bool isObjectFinalizable = IsObjectFinalized(cls);
129     if (UNLIKELY(isObjectFinalizable || GetNotificationManager()->HasAllocationListeners())) {
130         // Use object handle here as RegisterFinalizedObject can trigger GC
131         [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
132         VMHandle<ObjectHeader> handle(thread, object);
133         RegisterFinalizedObject(handle.GetPtr(), cls, isObjectFinalizable);
134         GetNotificationManager()->ObjectAllocEvent(cls, handle.GetPtr(), thread, size);
135         object = handle.GetPtr();
136     }
137     return object;
138 }
139 
TryGCAndAlloc(size_t size,Alignment align,ManagedThread * thread,ObjectAllocatorBase::ObjMemInitPolicy objInitType,bool pinned)140 void *HeapManager::TryGCAndAlloc(size_t size, Alignment align, ManagedThread *thread,
141                                  ObjectAllocatorBase::ObjMemInitPolicy objInitType, bool pinned)
142 {
143     // do not try many times in case of OOM scenarios.
144     constexpr size_t ALLOC_RETRY = 4;
145     size_t allocTryCnt = 0;
146     void *mem = nullptr;
147     bool isGenerational = GetGC()->IsGenerational();
148     ASSERT(!thread->HasPendingException());
149 
150     while (mem == nullptr && allocTryCnt < ALLOC_RETRY) {
151         ++allocTryCnt;
152         GCTaskCause cause;
153         // add comment why -1
154         if (allocTryCnt == ALLOC_RETRY - 1 || !isGenerational) {
155             cause = GCTaskCause::OOM_CAUSE;
156         } else {
157             cause = GCTaskCause::YOUNG_GC_CAUSE;
158         }
159         GetGC()->WaitForGCInManaged(GCTask(cause));
160         mem = AllocateMemoryForObject(size, align, thread, objInitType, pinned);
161         if (mem != nullptr) {
162             // we could set OOM in gc, but we need to clear it if next gc was successfully and we allocated memory
163             thread->ClearException();
164         } else {
165             auto freedBytes = GetPandaVM()->GetGCStats()->GetObjectsFreedBytes();
166             auto lastYoungMovedBytes = GetPandaVM()->GetMemStats()->GetLastYoungObjectsMovedBytes();
167             // if last GC freed or moved from young space some bytes - it means that we have a progress in VM,
168             // just this thread was unlucky to get some memory. We reset alloc_try_cnt to try again.
169             if (freedBytes + lastYoungMovedBytes != 0) {
170                 allocTryCnt = 0;
171             }
172         }
173     }
174     return mem;
175 }
176 
AllocateMemoryForObject(size_t size,Alignment align,ManagedThread * thread,ObjectAllocatorBase::ObjMemInitPolicy objInitType,bool pinned)177 void *HeapManager::AllocateMemoryForObject(size_t size, Alignment align, ManagedThread *thread,
178                                            ObjectAllocatorBase::ObjMemInitPolicy objInitType, bool pinned)
179 {
180     void *mem = nullptr;
181     if (UseTLABForAllocations() && size <= GetTLABMaxAllocSize() && !pinned) {
182         ASSERT(thread != nullptr);
183         ASSERT(GetGC()->IsTLABsSupported());
184         // Try to allocate an object via TLAB
185         TLAB *currentTlab = thread->GetTLAB();
186         ASSERT(currentTlab != nullptr);  // A thread's TLAB must be initialized at least via some ZERO tlab values.
187         mem = currentTlab->Alloc(size);
188         bool shouldAllocNewTlab =
189             !isAdaptiveTlabSize_ || currentTlab->GetFillFraction() >= TLAB::MIN_DESIRED_FILL_FRACTION;
190         if (mem == nullptr && shouldAllocNewTlab) {
191             // We couldn't allocate an object via current TLAB,
192             // Therefore, create a new one and allocate in it.
193             if (CreateNewTLAB(thread)) {
194                 currentTlab = thread->GetTLAB();
195                 mem = currentTlab->Alloc(size);
196             }
197         }
198         if (PANDA_TRACK_TLAB_ALLOCATIONS && (mem != nullptr)) {
199             memStats_->RecordAllocateObject(GetAlignedObjectSize(size), SpaceType::SPACE_TYPE_OBJECT);
200         }
201     }
202     if (mem == nullptr) {  // if mem == nullptr, try to use common allocate scenario
203         mem = objectAllocator_.AsObjectAllocator()->Allocate(size, align, thread, objInitType, pinned);
204     }
205     return mem;
206 }
207 
208 template <bool IS_FIRST_CLASS_CLASS>
AllocateNonMovableObject(BaseClass * cls,size_t size,Alignment align,ManagedThread * thread,ObjectAllocatorBase::ObjMemInitPolicy objInitType)209 ObjectHeader *HeapManager::AllocateNonMovableObject(BaseClass *cls, size_t size, Alignment align, ManagedThread *thread,
210                                                     ObjectAllocatorBase::ObjMemInitPolicy objInitType)
211 {
212     ASSERT(size >= ObjectHeader::ObjectHeaderSize());
213     ASSERT(GetGC()->IsMutatorAllowed());
214     TriggerGCIfNeeded();
215     void *mem = objectAllocator_.AsObjectAllocator()->AllocateNonMovable(size, align, thread, objInitType);
216     if (UNLIKELY(mem == nullptr)) {
217         GCTaskCause cause = GCTaskCause::OOM_CAUSE;
218         GetGC()->WaitForGCInManaged(GCTask(cause));
219         mem = objectAllocator_.AsObjectAllocator()->AllocateNonMovable(size, align, thread, objInitType);
220     }
221     if (UNLIKELY(mem == nullptr)) {
222         if (ManagedThread::GetCurrent() != nullptr) {
223             ThrowOutOfMemoryError("AllocateNonMovableObject failed");
224         }
225         return nullptr;
226     }
227     LOG(DEBUG, MM_OBJECT_EVENTS) << "Alloc non-movable object at " << mem << " size: " << size << " cls: " << cls;
228     auto *object = InitObjectHeaderAtMem(cls, mem);
229     // cls can be null for first class creation, when we create ClassRoot::Class
230     // NOLINTNEXTLINE(readability-braces-around-statements, readability-misleading-indentation)
231     if constexpr (IS_FIRST_CLASS_CLASS) {
232         ASSERT(cls == nullptr);
233         // NOLINTNEXTLINE(readability-braces-around-statements, readability-misleading-indentation)
234     } else {
235         ASSERT(cls != nullptr);
236         bool isObjectFinalizable = IsObjectFinalized(cls);
237         if (UNLIKELY(isObjectFinalizable || GetNotificationManager()->HasAllocationListeners())) {
238             if (thread == nullptr) {
239                 thread = ManagedThread::GetCurrent();
240             }
241             [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
242             VMHandle<ObjectHeader> handle(thread, object);
243             RegisterFinalizedObject(handle.GetPtr(), cls, isObjectFinalizable);
244             GetNotificationManager()->ObjectAllocEvent(cls, handle.GetPtr(), thread, size);
245             object = handle.GetPtr();
246         }
247     }
248     return object;
249 }
250 
InitObjectHeaderAtMem(BaseClass * cls,void * mem)251 ObjectHeader *HeapManager::InitObjectHeaderAtMem(BaseClass *cls, void *mem)
252 {
253     ASSERT(mem != nullptr);
254     ASSERT(GetGC()->IsMutatorAllowed());
255 
256     auto object = static_cast<ObjectHeader *>(mem);
257     // we need zeroed memory here according to ISA
258     ASSERT(object->AtomicGetMark().GetValue() == 0);
259     ASSERT(object->ClassAddr<BaseClass *>() == nullptr);
260     // The order is crucial here - we need to have 0 class word to avoid data race with concurrent sweep.
261     // Otherwise we can remove not initialized object.
262     GetGC()->InitGCBits(object);
263     object->SetClass(cls);
264     return object;
265 }
266 
TriggerGCIfNeeded()267 void HeapManager::TriggerGCIfNeeded()
268 {
269     vm_->GetGCTrigger()->TriggerGcIfNeeded(GetGC());
270 }
271 
AllocateExtFrame(size_t size,size_t extSz)272 Frame *HeapManager::AllocateExtFrame(size_t size, size_t extSz)
273 {
274     ASSERT(GetGC()->IsMutatorAllowed());
275     StackFrameAllocator *frameAllocator = GetCurrentStackFrameAllocator();
276     return Frame::FromExt(frameAllocator->Alloc(size), extSz);
277 }
278 
CreateNewTLAB(ManagedThread * thread)279 bool HeapManager::CreateNewTLAB(ManagedThread *thread)
280 {
281     ASSERT(GetGC()->IsMutatorAllowed());
282     ASSERT(thread != nullptr);
283     size_t newTlabSize = 0;
284     TLAB *oldTlab = thread->GetTLAB();
285     ASSERT(oldTlab != nullptr);
286     if (!isAdaptiveTlabSize_) {
287         // Using initial tlab size
288         newTlabSize = Runtime::GetOptions().GetInitTlabSize();
289     } else {
290         thread->GetWeightedTlabAverage()->StoreNewSample(oldTlab->GetOccupiedSize(), oldTlab->GetSize());
291         newTlabSize = AlignUp(thread->GetWeightedTlabAverage()->GetLastCountedSumInSizeT(), DEFAULT_ALIGNMENT_IN_BYTES);
292     }
293     LOG(DEBUG, ALLOC) << "Allocating new tlab with size: " << newTlabSize;
294     ASSERT(newTlabSize != 0);
295     TLAB *newTlab = objectAllocator_.AsObjectAllocator()->CreateNewTLAB(newTlabSize);
296     if (newTlab != nullptr) {
297         RegisterTLAB(thread->GetTLAB());
298         thread->UpdateTLAB(newTlab);
299         return true;
300     }
301     return false;
302 }
303 
RegisterTLAB(const TLAB * tlab)304 void HeapManager::RegisterTLAB(const TLAB *tlab)
305 {
306     ASSERT(tlab != nullptr);
307     if (!PANDA_TRACK_TLAB_ALLOCATIONS && (tlab->GetOccupiedSize() != 0)) {
308         memStats_->RecordAllocateObject(tlab->GetOccupiedSize(), SpaceType::SPACE_TYPE_OBJECT);
309     }
310 }
311 
FreeExtFrame(Frame * frame,size_t extSz)312 void HeapManager::FreeExtFrame(Frame *frame, size_t extSz)
313 {
314     ASSERT(GetGC()->IsMutatorAllowed());
315     StackFrameAllocator *frameAllocator = GetCurrentStackFrameAllocator();
316     frameAllocator->Free(Frame::ToExt(frame, extSz));
317 }
318 
GetCodeAllocator() const319 CodeAllocator *HeapManager::GetCodeAllocator() const
320 {
321     return codeAllocator_;
322 }
323 
GetInternalAllocator()324 InternalAllocatorPtr HeapManager::GetInternalAllocator()
325 {
326     return internalAllocator_;
327 }
328 
GetObjectAllocator() const329 ObjectAllocatorPtr HeapManager::GetObjectAllocator() const
330 {
331     return objectAllocator_;
332 }
333 
GetCurrentStackFrameAllocator()334 StackFrameAllocator *HeapManager::GetCurrentStackFrameAllocator()
335 {
336     return ManagedThread::GetCurrent()->GetStackFrameAllocator();
337 }
338 
PreZygoteFork()339 void HeapManager::PreZygoteFork()
340 {
341     GetGC()->WaitForGCOnPygoteFork(GCTask(GCTaskCause::PYGOTE_FORK_CAUSE));
342 }
343 
GetTargetHeapUtilization() const344 float HeapManager::GetTargetHeapUtilization() const
345 {
346     return targetUtilization_;
347 }
348 
SetTargetHeapUtilization(float target)349 void HeapManager::SetTargetHeapUtilization(float target)
350 {
351     ASSERT_PRINT(target > 0.0F && target < 1.0F, "Target heap utilization should be in the range (0,1)");
352     targetUtilization_ = target;
353 }
354 
GetTotalMemory() const355 size_t HeapManager::GetTotalMemory() const
356 {
357     return GetObjectAllocator().AsObjectAllocator()->GetHeapSpace()->GetHeapSize();
358 }
359 
GetFreeMemory() const360 size_t HeapManager::GetFreeMemory() const
361 {
362     return helpers::UnsignedDifference(GetTotalMemory(), vm_->GetMemStats()->GetFootprintHeap());
363 }
364 
ClampNewMaxHeapSize()365 void HeapManager::ClampNewMaxHeapSize()
366 {
367     objectAllocator_.AsObjectAllocator()->GetHeapSpace()->ClampCurrentMaxHeapSize();
368 }
369 
370 /**
371  * @brief Check whether the given object is an instance of the given class.
372  * @param obj - ObjectHeader pointer
373  * @param h_class - Class pointer
374  * @param assignable - whether the subclass of h_class counts
375  * @return true if obj is instanceOf h_class, otherwise false
376  */
MatchesClass(const ObjectHeader * obj,const Class * hClass,bool assignable)377 static bool MatchesClass(const ObjectHeader *obj, const Class *hClass, bool assignable)
378 {
379     if (assignable) {
380         return obj->IsInstanceOf(hClass);
381     }
382     return obj->ClassAddr<Class>() == hClass;
383 }
384 
CountInstances(const PandaVector<Class * > & classes,bool assignable,uint64_t * counts)385 void HeapManager::CountInstances(const PandaVector<Class *> &classes, bool assignable, uint64_t *counts)
386 {
387     auto objectsChecker = [&](ObjectHeader *obj) {
388         for (size_t i = 0; i < classes.size(); ++i) {
389             if (classes[i] == nullptr) {
390                 continue;
391             }
392             if (MatchesClass(obj, classes[i], assignable)) {
393                 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
394                 ++counts[i];
395             }
396         }
397     };
398     {
399         MTManagedThread *thread = MTManagedThread::GetCurrent();
400         ASSERT(thread != nullptr);
401         ScopedChangeThreadStatus ets(thread, ThreadStatus::RUNNING);
402         ScopedSuspendAllThreadsRunning ssatr(thread->GetVM()->GetRendezvous());
403         GetObjectAllocator().AsObjectAllocator()->IterateOverObjects(objectsChecker);
404     }
405 }
406 
SetIsFinalizableFunc(IsObjectFinalizebleFunc func)407 void HeapManager::SetIsFinalizableFunc(IsObjectFinalizebleFunc func)
408 {
409     isObjectFinalizebleFunc_ = func;
410 }
411 
SetRegisterFinalizeReferenceFunc(RegisterFinalizeReferenceFunc func)412 void HeapManager::SetRegisterFinalizeReferenceFunc(RegisterFinalizeReferenceFunc func)
413 {
414     registerFinalizeReferenceFunc_ = func;
415 }
416 
IsObjectFinalized(BaseClass * cls)417 bool HeapManager::IsObjectFinalized(BaseClass *cls)
418 {
419     return isObjectFinalizebleFunc_ != nullptr && isObjectFinalizebleFunc_(cls);
420 }
421 
RegisterFinalizedObject(ObjectHeader * object,BaseClass * cls,bool isObjectFinalizable)422 void HeapManager::RegisterFinalizedObject(ObjectHeader *object, BaseClass *cls, bool isObjectFinalizable)
423 {
424     if (isObjectFinalizable) {
425         ASSERT(registerFinalizeReferenceFunc_ != nullptr);
426         registerFinalizeReferenceFunc_(object, cls);
427     }
428 }
429 
430 template ObjectHeader *HeapManager::AllocateNonMovableObject<true>(BaseClass *cls, size_t size, Alignment align,
431                                                                    ManagedThread *thread,
432                                                                    ObjectAllocatorBase::ObjMemInitPolicy obj_init_type);
433 
434 template ObjectHeader *HeapManager::AllocateNonMovableObject<false>(
435     BaseClass *cls, size_t size, Alignment align, ManagedThread *thread,
436     ObjectAllocatorBase::ObjMemInitPolicy obj_init_type);
437 }  // namespace ark::mem
438