• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "heap_manager.h"
17 #include "mem/pool_manager.h"
18 #include "mem/mmap_mem_pool-inl.h"
19 #include "mem/internal_allocator-inl.h"
20 #include "include/runtime.h"
21 #include "include/locks.h"
22 
23 #include <string>
24 
25 #include "libpandabase/mem/mmap_mem_pool-inl.h"
26 #include "libpandabase/mem/pool_manager.h"
27 #include "runtime/include/runtime_notification.h"
28 #include "runtime/include/thread_scopes.h"
29 #include "runtime/handle_base-inl.h"
30 #include "runtime/include/panda_vm.h"
31 #include "runtime/mem/gc/epsilon/epsilon.h"
32 #include "runtime/mem/gc/epsilon-g1/epsilon-g1.h"
33 #include "runtime/mem/gc/gen-gc/gen-gc.h"
34 #include "runtime/mem/gc/g1/g1-gc.h"
35 #include "runtime/mem/gc/stw-gc/stw-gc.h"
36 
37 namespace panda::mem {
38 
Initialize(GCType gcType,bool singleThreaded,bool useTlab,MemStatsType * memStats,InternalAllocatorPtr internalAllocator,bool createPygoteSpace)39 bool HeapManager::Initialize(GCType gcType, bool singleThreaded, bool useTlab, MemStatsType *memStats,
40                              InternalAllocatorPtr internalAllocator, bool createPygoteSpace)
41 {
42     trace::ScopedTrace scopedTrace("HeapManager::Initialize");
43     bool ret = false;
44     memStats_ = memStats;
45     internalAllocator_ = internalAllocator;
46     // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
47 #define FWD_GC_INIT(type, mem_stats)                                              \
48     case type:                                                                    \
49         if (singleThreaded) {                                                     \
50             ret = Initialize<type, MT_MODE_SINGLE>(mem_stats, createPygoteSpace); \
51         } else {                                                                  \
52             ret = Initialize<type, MT_MODE_MULTI>(mem_stats, createPygoteSpace);  \
53         }                                                                         \
54         break
55 
56     switch (gcType) {
57         FWD_GC_INIT(GCType::EPSILON_GC, memStats);
58         FWD_GC_INIT(GCType::EPSILON_G1_GC, memStats);
59         FWD_GC_INIT(GCType::STW_GC, memStats);
60         FWD_GC_INIT(GCType::GEN_GC, memStats);
61         FWD_GC_INIT(GCType::G1_GC, memStats);
62         default:
63             LOG(FATAL, GC) << "Invalid init for gc_type = " << static_cast<int>(gcType);
64             break;
65     }
66 #undef FWD_GC_INIT
67     // We want to use common allocate scenario in AOT/JIT/Irtoc code with option run-gc-every-safepoint
68     // to check state of memory in TriggerGCIfNeeded
69     if (!objectAllocator_.AsObjectAllocator()->IsTLABSupported() || Runtime::GetOptions().IsRunGcEverySafepoint()) {
70         useTlab = false;
71     }
72     useTlabForAllocations_ = useTlab;
73     // Now, USE_TLAB_FOR_ALLOCATIONS option is supported only for Generational GCs
74     ASSERT(IsGenerationalGCType(gcType) || (!useTlabForAllocations_));
75     return ret;
76 }
77 
SetPandaVM(PandaVM * vm)78 void HeapManager::SetPandaVM(PandaVM *vm)
79 {
80     vm_ = vm;
81     gc_ = vm_->GetGC();
82     notificationManager_ = Runtime::GetCurrent()->GetNotificationManager();
83 }
84 
Finalize()85 bool HeapManager::Finalize()
86 {
87     delete codeAllocator_;
88     codeAllocator_ = nullptr;
89     objectAllocator_->VisitAndRemoveAllPools(
90         [](void *mem, [[maybe_unused]] size_t size) { PoolManager::GetMmapMemPool()->FreePool(mem, size); });
91     delete static_cast<Allocator *>(objectAllocator_);
92 
93     return true;
94 }
95 
AllocateObject(BaseClass * cls,size_t size,Alignment align,ManagedThread * thread,ObjectAllocatorBase::ObjMemInitPolicy objInitType)96 ObjectHeader *HeapManager::AllocateObject(BaseClass *cls, size_t size, Alignment align, ManagedThread *thread,
97                                           ObjectAllocatorBase::ObjMemInitPolicy objInitType)
98 {
99     ASSERT(size >= ObjectHeader::ObjectHeaderSize());
100     ASSERT(GetGC()->IsMutatorAllowed());
101     TriggerGCIfNeeded();
102     if (thread == nullptr) {
103         // NOTE(dtrubenkov): try to avoid this
104         thread = ManagedThread::GetCurrent();
105         ASSERT(thread != nullptr);
106     }
107     void *mem = AllocateMemoryForObject(size, align, thread, objInitType);
108     if (UNLIKELY(mem == nullptr)) {
109         mem = TryGCAndAlloc(size, align, thread, objInitType);
110         if (UNLIKELY(mem == nullptr)) {
111             ThrowOutOfMemoryError("AllocateObject failed");
112             return nullptr;
113         }
114     }
115     LOG(DEBUG, MM_OBJECT_EVENTS) << "Alloc object at " << mem << " size: " << size;
116     ObjectHeader *object = InitObjectHeaderAtMem(cls, mem);
117     bool isObjectFinalizable = IsObjectFinalized(cls);
118     if (UNLIKELY(isObjectFinalizable || GetNotificationManager()->HasAllocationListeners())) {
119         // Use object handle here as RegisterFinalizedObject can trigger GC
120         [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
121         VMHandle<ObjectHeader> handle(thread, object);
122         RegisterFinalizedObject(handle.GetPtr(), cls, isObjectFinalizable);
123         GetNotificationManager()->ObjectAllocEvent(cls, handle.GetPtr(), thread, size);
124         object = handle.GetPtr();
125     }
126     return object;
127 }
128 
TryGCAndAlloc(size_t size,Alignment align,ManagedThread * thread,ObjectAllocatorBase::ObjMemInitPolicy objInitType)129 void *HeapManager::TryGCAndAlloc(size_t size, Alignment align, ManagedThread *thread,
130                                  ObjectAllocatorBase::ObjMemInitPolicy objInitType)
131 {
132     // do not try many times in case of OOM scenarios.
133     constexpr size_t ALLOC_RETRY = 4;
134     size_t allocTryCnt = 0;
135     void *mem = nullptr;
136     bool isGenerational = GetGC()->IsGenerational();
137     ASSERT(!thread->HasPendingException());
138 
139     while (mem == nullptr && allocTryCnt < ALLOC_RETRY) {
140         ++allocTryCnt;
141         GCTaskCause cause;
142         // add comment why -1
143         if (allocTryCnt == ALLOC_RETRY - 1 || !isGenerational) {
144             cause = GCTaskCause::OOM_CAUSE;
145         } else {
146             cause = GCTaskCause::YOUNG_GC_CAUSE;
147         }
148         GetGC()->WaitForGCInManaged(GCTask(cause));
149         mem = AllocateMemoryForObject(size, align, thread, objInitType);
150         if (mem != nullptr) {
151             // we could set OOM in gc, but we need to clear it if next gc was successfully and we allocated memory
152             thread->ClearException();
153         } else {
154             auto freedBytes = GetPandaVM()->GetGCStats()->GetObjectsFreedBytes();
155             auto lastYoungMovedBytes = GetPandaVM()->GetMemStats()->GetLastYoungObjectsMovedBytes();
156             // if last GC freed or moved from young space some bytes - it means that we have a progress in VM,
157             // just this thread was unlucky to get some memory. We reset alloc_try_cnt to try again.
158             if (freedBytes + lastYoungMovedBytes != 0) {
159                 allocTryCnt = 0;
160             }
161         }
162     }
163     return mem;
164 }
165 
AllocateMemoryForObject(size_t size,Alignment align,ManagedThread * thread,ObjectAllocatorBase::ObjMemInitPolicy objInitType)166 void *HeapManager::AllocateMemoryForObject(size_t size, Alignment align, ManagedThread *thread,
167                                            ObjectAllocatorBase::ObjMemInitPolicy objInitType)
168 {
169     void *mem = nullptr;
170     if (UseTLABForAllocations() && size <= GetTLABMaxAllocSize()) {
171         ASSERT(thread != nullptr);
172         ASSERT(GetGC()->IsTLABsSupported());
173         // Try to allocate an object via TLAB
174         TLAB *currentTlab = thread->GetTLAB();
175         ASSERT(currentTlab != nullptr);  // A thread's TLAB must be initialized at least via some ZERO tlab values.
176         mem = currentTlab->Alloc(size);
177         if (mem == nullptr) {
178             // We couldn't allocate an object via current TLAB,
179             // Therefore, create a new one and allocate in it.
180             if (CreateNewTLAB(thread)) {
181                 currentTlab = thread->GetTLAB();
182                 mem = currentTlab->Alloc(size);
183             }
184         }
185         if (PANDA_TRACK_TLAB_ALLOCATIONS && (mem != nullptr)) {
186             memStats_->RecordAllocateObject(GetAlignedObjectSize(size), SpaceType::SPACE_TYPE_OBJECT);
187         }
188     }
189     if (mem == nullptr) {  // if mem == nullptr, try to use common allocate scenario
190         mem = objectAllocator_.AsObjectAllocator()->Allocate(size, align, thread, objInitType);
191     }
192     return mem;
193 }
194 
195 template <bool IS_FIRST_CLASS_CLASS>
AllocateNonMovableObject(BaseClass * cls,size_t size,Alignment align,ManagedThread * thread,ObjectAllocatorBase::ObjMemInitPolicy objInitType)196 ObjectHeader *HeapManager::AllocateNonMovableObject(BaseClass *cls, size_t size, Alignment align, ManagedThread *thread,
197                                                     ObjectAllocatorBase::ObjMemInitPolicy objInitType)
198 {
199     ASSERT(size >= ObjectHeader::ObjectHeaderSize());
200     ASSERT(GetGC()->IsMutatorAllowed());
201     TriggerGCIfNeeded();
202     void *mem = objectAllocator_.AsObjectAllocator()->AllocateNonMovable(size, align, thread, objInitType);
203     if (UNLIKELY(mem == nullptr)) {
204         GCTaskCause cause = GCTaskCause::OOM_CAUSE;
205         GetGC()->WaitForGCInManaged(GCTask(cause));
206         mem = objectAllocator_.AsObjectAllocator()->AllocateNonMovable(size, align, thread, objInitType);
207     }
208     if (UNLIKELY(mem == nullptr)) {
209         if (ManagedThread::GetCurrent() != nullptr) {
210             ThrowOutOfMemoryError("AllocateNonMovableObject failed");
211         }
212         return nullptr;
213     }
214     LOG(DEBUG, MM_OBJECT_EVENTS) << "Alloc non-movable object at " << mem << " size: " << size;
215     auto *object = InitObjectHeaderAtMem(cls, mem);
216     // cls can be null for first class creation, when we create ClassRoot::Class
217     // NOLINTNEXTLINE(readability-braces-around-statements, readability-misleading-indentation)
218     if constexpr (IS_FIRST_CLASS_CLASS) {
219         ASSERT(cls == nullptr);
220         // NOLINTNEXTLINE(readability-braces-around-statements, readability-misleading-indentation)
221     } else {
222         ASSERT(cls != nullptr);
223         bool isObjectFinalizable = IsObjectFinalized(cls);
224         if (UNLIKELY(isObjectFinalizable || GetNotificationManager()->HasAllocationListeners())) {
225             if (thread == nullptr) {
226                 thread = ManagedThread::GetCurrent();
227             }
228             [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
229             VMHandle<ObjectHeader> handle(thread, object);
230             RegisterFinalizedObject(handle.GetPtr(), cls, isObjectFinalizable);
231             GetNotificationManager()->ObjectAllocEvent(cls, handle.GetPtr(), thread, size);
232             object = handle.GetPtr();
233         }
234     }
235     return object;
236 }
237 
InitObjectHeaderAtMem(BaseClass * cls,void * mem)238 ObjectHeader *HeapManager::InitObjectHeaderAtMem(BaseClass *cls, void *mem)
239 {
240     ASSERT(mem != nullptr);
241     ASSERT(GetGC()->IsMutatorAllowed());
242 
243     auto object = static_cast<ObjectHeader *>(mem);
244     // we need zeroed memory here according to ISA
245     ASSERT(object->AtomicGetMark().GetValue() == 0);
246     ASSERT(object->ClassAddr<BaseClass *>() == nullptr);
247     // The order is crucial here - we need to have 0 class word to avoid data race with concurrent sweep.
248     // Otherwise we can remove not initialized object.
249     GetGC()->InitGCBits(object);
250     object->SetClass(cls);
251     return object;
252 }
253 
TriggerGCIfNeeded()254 void HeapManager::TriggerGCIfNeeded()
255 {
256     vm_->GetGCTrigger()->TriggerGcIfNeeded(GetGC());
257 }
258 
AllocateExtFrame(size_t size,size_t extSz)259 Frame *HeapManager::AllocateExtFrame(size_t size, size_t extSz)
260 {
261     ASSERT(GetGC()->IsMutatorAllowed());
262     StackFrameAllocator *frameAllocator = GetCurrentStackFrameAllocator();
263     return Frame::FromExt(frameAllocator->Alloc(size), extSz);
264 }
265 
CreateNewTLAB(ManagedThread * thread)266 bool HeapManager::CreateNewTLAB(ManagedThread *thread)
267 {
268     ASSERT(GetGC()->IsMutatorAllowed());
269     ASSERT(thread != nullptr);
270     TLAB *newTlab = objectAllocator_.AsObjectAllocator()->CreateNewTLAB(thread);
271     if (newTlab != nullptr) {
272         RegisterTLAB(thread->GetTLAB());
273         thread->UpdateTLAB(newTlab);
274         return true;
275     }
276     return false;
277 }
278 
RegisterTLAB(const TLAB * tlab)279 void HeapManager::RegisterTLAB(const TLAB *tlab)
280 {
281     ASSERT(tlab != nullptr);
282     if (!PANDA_TRACK_TLAB_ALLOCATIONS && (tlab->GetOccupiedSize() != 0)) {
283         memStats_->RecordAllocateObject(tlab->GetOccupiedSize(), SpaceType::SPACE_TYPE_OBJECT);
284     }
285 }
286 
FreeExtFrame(Frame * frame,size_t extSz)287 void HeapManager::FreeExtFrame(Frame *frame, size_t extSz)
288 {
289     ASSERT(GetGC()->IsMutatorAllowed());
290     StackFrameAllocator *frameAllocator = GetCurrentStackFrameAllocator();
291     frameAllocator->Free(Frame::ToExt(frame, extSz));
292 }
293 
GetCodeAllocator() const294 CodeAllocator *HeapManager::GetCodeAllocator() const
295 {
296     return codeAllocator_;
297 }
298 
GetInternalAllocator()299 InternalAllocatorPtr HeapManager::GetInternalAllocator()
300 {
301     return internalAllocator_;
302 }
303 
GetObjectAllocator()304 ObjectAllocatorPtr HeapManager::GetObjectAllocator()
305 {
306     return objectAllocator_;
307 }
308 
GetCurrentStackFrameAllocator()309 StackFrameAllocator *HeapManager::GetCurrentStackFrameAllocator()
310 {
311     return ManagedThread::GetCurrent()->GetStackFrameAllocator();
312 }
313 
PreZygoteFork()314 void HeapManager::PreZygoteFork()
315 {
316     GetGC()->WaitForGCOnPygoteFork(GCTask(GCTaskCause::PYGOTE_FORK_CAUSE));
317 }
318 
GetTargetHeapUtilization() const319 float HeapManager::GetTargetHeapUtilization() const
320 {
321     return targetUtilization_;
322 }
323 
SetTargetHeapUtilization(float target)324 void HeapManager::SetTargetHeapUtilization(float target)
325 {
326     ASSERT_PRINT(target > 0.0F && target < 1.0F, "Target heap utilization should be in the range (0,1)");
327     targetUtilization_ = target;
328 }
329 
GetTotalMemory() const330 size_t HeapManager::GetTotalMemory() const
331 {
332     return vm_->GetMemStats()->GetFootprintHeap();
333 }
334 
GetFreeMemory() const335 size_t HeapManager::GetFreeMemory() const
336 {
337     return helpers::UnsignedDifference(GetTotalMemory(), vm_->GetMemStats()->GetFootprintHeap());
338 }
339 
ClampNewMaxHeapSize()340 void HeapManager::ClampNewMaxHeapSize()
341 {
342     objectAllocator_.AsObjectAllocator()->GetHeapSpace()->ClampCurrentMaxHeapSize();
343 }
344 
345 /**
346  * @brief Check whether the given object is an instance of the given class.
347  * @param obj - ObjectHeader pointer
348  * @param h_class - Class pointer
349  * @param assignable - whether the subclass of h_class counts
350  * @return true if obj is instanceOf h_class, otherwise false
351  */
MatchesClass(const ObjectHeader * obj,const Class * hClass,bool assignable)352 static bool MatchesClass(const ObjectHeader *obj, const Class *hClass, bool assignable)
353 {
354     if (assignable) {
355         return obj->IsInstanceOf(hClass);
356     }
357     return obj->ClassAddr<Class>() == hClass;
358 }
359 
CountInstances(const PandaVector<Class * > & classes,bool assignable,uint64_t * counts)360 void HeapManager::CountInstances(const PandaVector<Class *> &classes, bool assignable, uint64_t *counts)
361 {
362     auto objectsChecker = [&](ObjectHeader *obj) {
363         for (size_t i = 0; i < classes.size(); ++i) {
364             if (classes[i] == nullptr) {
365                 continue;
366             }
367             if (MatchesClass(obj, classes[i], assignable)) {
368                 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
369                 ++counts[i];
370             }
371         }
372     };
373     {
374         MTManagedThread *thread = MTManagedThread::GetCurrent();
375         ASSERT(thread != nullptr);
376         ScopedChangeThreadStatus sts(thread, ThreadStatus::RUNNING);
377         ScopedSuspendAllThreadsRunning ssatr(thread->GetVM()->GetRendezvous());
378         GetObjectAllocator().AsObjectAllocator()->IterateOverObjects(objectsChecker);
379     }
380 }
381 
SetIsFinalizableFunc(IsObjectFinalizebleFunc func)382 void HeapManager::SetIsFinalizableFunc(IsObjectFinalizebleFunc func)
383 {
384     isObjectFinalizebleFunc_ = func;
385 }
386 
SetRegisterFinalizeReferenceFunc(RegisterFinalizeReferenceFunc func)387 void HeapManager::SetRegisterFinalizeReferenceFunc(RegisterFinalizeReferenceFunc func)
388 {
389     registerFinalizeReferenceFunc_ = func;
390 }
391 
IsObjectFinalized(BaseClass * cls)392 bool HeapManager::IsObjectFinalized(BaseClass *cls)
393 {
394     return isObjectFinalizebleFunc_ != nullptr && isObjectFinalizebleFunc_(cls);
395 }
396 
RegisterFinalizedObject(ObjectHeader * object,BaseClass * cls,bool isObjectFinalizable)397 void HeapManager::RegisterFinalizedObject(ObjectHeader *object, BaseClass *cls, bool isObjectFinalizable)
398 {
399     if (isObjectFinalizable) {
400         ASSERT(registerFinalizeReferenceFunc_ != nullptr);
401         registerFinalizeReferenceFunc_(object, cls);
402     }
403 }
404 
405 template ObjectHeader *HeapManager::AllocateNonMovableObject<true>(BaseClass *cls, size_t size, Alignment align,
406                                                                    ManagedThread *thread,
407                                                                    ObjectAllocatorBase::ObjMemInitPolicy obj_init_type);
408 
409 template ObjectHeader *HeapManager::AllocateNonMovableObject<false>(
410     BaseClass *cls, size_t size, Alignment align, ManagedThread *thread,
411     ObjectAllocatorBase::ObjMemInitPolicy obj_init_type);
412 }  // namespace panda::mem
413