• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "heap_manager.h"
17 #include "mem/gc/hybrid-gc/hybrid_object_allocator.h"
18 #include "mem/pool_manager.h"
19 #include "mem/mmap_mem_pool-inl.h"
20 #include "mem/internal_allocator-inl.h"
21 #include "include/runtime.h"
22 #include "include/locks.h"
23 
24 #include <string>
25 
26 #include "libpandabase/mem/mmap_mem_pool-inl.h"
27 #include "libpandabase/mem/pool_manager.h"
28 #include "runtime/include/runtime_notification.h"
29 #include "runtime/include/thread_scopes.h"
30 #include "runtime/handle_base-inl.h"
31 #include "runtime/include/panda_vm.h"
32 #include "runtime/mem/gc/g1/g1-gc.h"
33 
34 namespace panda::mem {
35 
Initialize(GCType gc_type,bool single_threaded,bool use_tlab,MemStatsType * mem_stats,InternalAllocatorPtr internal_allocator,bool create_pygote_space)36 bool HeapManager::Initialize(GCType gc_type, bool single_threaded, bool use_tlab, MemStatsType *mem_stats,
37                              InternalAllocatorPtr internal_allocator, bool create_pygote_space)
38 {
39     trace::ScopedTrace scoped_trace("HeapManager::Initialize");
40     bool ret = false;
41     mem_stats_ = mem_stats;
42     internalAllocator_ = internal_allocator;
43     // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
44 #define FWD_GC_INIT(type, mem_stats)                                                \
45     case type:                                                                      \
46         if (single_threaded) {                                                      \
47             ret = Initialize<type, MT_MODE_SINGLE>(mem_stats, create_pygote_space); \
48         } else {                                                                    \
49             ret = Initialize<type, MT_MODE_MULTI>(mem_stats, create_pygote_space);  \
50         }                                                                           \
51         break
52 
53     switch (gc_type) {
54         FWD_GC_INIT(GCType::EPSILON_GC, mem_stats);
55         FWD_GC_INIT(GCType::STW_GC, mem_stats);
56         FWD_GC_INIT(GCType::GEN_GC, mem_stats);
57         FWD_GC_INIT(GCType::HYBRID_GC, mem_stats);
58         FWD_GC_INIT(GCType::G1_GC, mem_stats);
59         default:
60             LOG(FATAL, GC) << "Invalid init for gc_type = " << static_cast<int>(gc_type);
61             break;
62     }
63 #undef FWD_GC_INIT
64     if (!objectAllocator_.AsObjectAllocator()->IsTLABSupported() || single_threaded) {
65         use_tlab = false;
66     }
67     use_tlab_for_allocations_ = use_tlab;
68     // Now, USE_TLAB_FOR_ALLOCATIONS option is supported only for Generational GCs
69     ASSERT(IsGenerationalGCType(gc_type) || (!use_tlab_for_allocations_));
70     return ret;
71 }
72 
SetPandaVM(PandaVM * vm)73 void HeapManager::SetPandaVM(PandaVM *vm)
74 {
75     vm_ = vm;
76     gc_ = vm_->GetGC();
77     notification_manager_ = Runtime::GetCurrent()->GetNotificationManager();
78 }
79 
Finalize()80 bool HeapManager::Finalize()
81 {
82     delete codeAllocator_;
83     objectAllocator_->VisitAndRemoveAllPools(
84         [](void *mem, [[maybe_unused]] size_t size) { PoolManager::GetMmapMemPool()->FreePool(mem, size); });
85     delete static_cast<Allocator *>(objectAllocator_);
86 
87     return true;
88 }
89 
AllocateObject(BaseClass * cls,size_t size,Alignment align,ManagedThread * thread)90 ObjectHeader *HeapManager::AllocateObject(BaseClass *cls, size_t size, Alignment align, ManagedThread *thread)
91 {
92     ASSERT(GetGC()->IsMutatorAllowed());
93     TriggerGCIfNeeded();
94     if (thread == nullptr) {
95         // TODO(dtrubenkov): try to avoid this
96         thread = ManagedThread::GetCurrent();
97         ASSERT(thread != nullptr);
98     }
99     void *mem = AllocateMemoryForObject(size, align, thread);
100     if (UNLIKELY(mem == nullptr)) {
101         mem = TryGCAndAlloc(size, align, thread);
102         if (UNLIKELY(mem == nullptr)) {
103             ThrowOutOfMemoryError("AllocateObject failed");
104             return nullptr;
105         }
106     }
107     LOG(DEBUG, MM_OBJECT_EVENTS) << "Alloc object at " << mem << " size: " << size;
108     ObjectHeader *object = InitObjectHeaderAtMem(cls, mem);
109     bool is_object_finalizable = IsObjectFinalized(cls);
110     if (UNLIKELY(is_object_finalizable || GetNotificationManager()->HasAllocationListeners())) {
111         // Use object handle here as RegisterFinalizedObject can trigger GC
112         [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
113         VMHandle<ObjectHeader> handle(thread, object);
114         RegisterFinalizedObject(handle.GetPtr(), cls, is_object_finalizable);
115         GetNotificationManager()->ObjectAllocEvent(cls, handle.GetPtr(), thread, size);
116         object = handle.GetPtr();
117     }
118     return object;
119 }
120 
TryGCAndAlloc(size_t size,Alignment align,ManagedThread * thread)121 void *HeapManager::TryGCAndAlloc(size_t size, Alignment align, ManagedThread *thread)
122 {
123     // do not try many times in case of OOM scenarios.
124     constexpr size_t ALLOC_RETRY = 4;
125     size_t alloc_try_cnt = 0;
126     void *mem = nullptr;
127     bool is_generational = GetGC()->IsGenerational();
128     ASSERT(!thread->HasPendingException());
129 
130     while (mem == nullptr && alloc_try_cnt < ALLOC_RETRY) {
131         ++alloc_try_cnt;
132         GCTaskCause cause;
133         // add comment why -1
134         if (alloc_try_cnt == ALLOC_RETRY - 1 || !is_generational) {
135             cause = GCTaskCause::OOM_CAUSE;
136         } else {
137             cause = GCTaskCause::YOUNG_GC_CAUSE;
138         }
139         GetGC()->WaitForGCInManaged(GCTask(cause, thread));
140         mem = AllocateMemoryForObject(size, align, thread);
141         if (mem != nullptr) {
142             // we could set OOM in gc, but we need to clear it if next gc was successfully and we allocated memory
143             thread->ClearException();
144         } else {
145             auto freed_bytes = GetPandaVM()->GetGCStats()->GetObjectsFreedBytes();
146             auto last_young_moved_bytes = GetPandaVM()->GetMemStats()->GetLastYoungObjectsMovedBytes();
147             // if last GC freed or moved from young space some bytes - it means that we have a progress in VM,
148             // just this thread was unlucky to get some memory. We reset alloc_try_cnt to try again.
149             if (freed_bytes + last_young_moved_bytes != 0) {
150                 alloc_try_cnt = 0;
151             }
152         }
153     }
154     return mem;
155 }
156 
AllocateMemoryForObject(size_t size,Alignment align,ManagedThread * thread)157 void *HeapManager::AllocateMemoryForObject(size_t size, Alignment align, ManagedThread *thread)
158 {
159     void *mem = nullptr;
160     if (UseTLABForAllocations() && size <= GetTLABMaxAllocSize()) {
161         ASSERT(thread != nullptr);
162         ASSERT(GetGC()->IsTLABsSupported());
163         // Try to allocate an object via TLAB
164         TLAB *current_tlab = thread->GetTLAB();
165         ASSERT(current_tlab != nullptr);  // A thread's TLAB must be initialized at least via some ZERO tlab values.
166         mem = current_tlab->Alloc(size);
167         if (mem == nullptr) {
168             // We couldn't allocate an object via current TLAB,
169             // Therefore, create a new one and allocate in it.
170             if (CreateNewTLAB(thread)) {
171                 current_tlab = thread->GetTLAB();
172                 mem = current_tlab->Alloc(size);
173             }
174         }
175         if (PANDA_TRACK_TLAB_ALLOCATIONS && (mem != nullptr)) {
176             mem_stats_->RecordAllocateObject(GetAlignedObjectSize(size), SpaceType::SPACE_TYPE_OBJECT);
177         }
178     }
179     if (mem == nullptr) {  // if mem == nullptr, try to use common allocate scenario
180         mem = objectAllocator_->Allocate(size, align, thread);
181     }
182     return mem;
183 }
184 
185 template <bool IsFirstClassClass>
AllocateNonMovableObject(BaseClass * cls,size_t size,Alignment align,ManagedThread * thread)186 ObjectHeader *HeapManager::AllocateNonMovableObject(BaseClass *cls, size_t size, Alignment align, ManagedThread *thread)
187 {
188     ASSERT(GetGC()->IsMutatorAllowed());
189     TriggerGCIfNeeded();
190     void *mem = objectAllocator_->AllocateNonMovable(size, align, thread);
191     if (UNLIKELY(mem == nullptr)) {
192         GCTaskCause cause = GCTaskCause::OOM_CAUSE;
193         GetGC()->WaitForGCInManaged(GCTask(cause, thread));
194         mem = objectAllocator_->AllocateNonMovable(size, align, thread);
195     }
196     if (UNLIKELY(mem == nullptr)) {
197         if (ManagedThread::GetCurrent() != nullptr) {
198             ThrowOutOfMemoryError("AllocateNonMovableObject failed");
199         }
200         return nullptr;
201     }
202     LOG(DEBUG, MM_OBJECT_EVENTS) << "Alloc non-movable object at " << mem << " size: " << size;
203     auto *object = InitObjectHeaderAtMem(cls, mem);
204     // cls can be null for first class creation, when we create ClassRoot::Class
205     // NOLINTNEXTLINE(readability-braces-around-statements, readability-misleading-indentation)
206     if constexpr (IsFirstClassClass) {
207         ASSERT(cls == nullptr);
208         // NOLINTNEXTLINE(readability-braces-around-statements, readability-misleading-indentation)
209     } else {
210         ASSERT(cls != nullptr);
211         bool is_object_finalizable = IsObjectFinalized(cls);
212         RegisterFinalizedObject(object, cls, is_object_finalizable);
213         GetNotificationManager()->ObjectAllocEvent(cls, object, thread, size);
214     }
215     return object;
216 }
217 
InitObjectHeaderAtMem(BaseClass * cls,void * mem)218 ObjectHeader *HeapManager::InitObjectHeaderAtMem(BaseClass *cls, void *mem)
219 {
220     ASSERT(mem != nullptr);
221     ASSERT(GetGC()->IsMutatorAllowed());
222 
223     auto object = static_cast<ObjectHeader *>(mem);
224     // we need zeroed memory here according to ISA
225     ASSERT(object->AtomicGetMark().GetValue() == 0);
226     ASSERT(object->AtomicClassAddr<BaseClass *>() == nullptr);
227     // The order is crucial here - we need to have 0 class word to avoid data race with concurrent sweep.
228     // Otherwise we can remove not initialized object.
229     GetGC()->InitGCBits(object);
230     object->SetClass(cls);
231     return object;
232 }
233 
TriggerGCIfNeeded()234 void HeapManager::TriggerGCIfNeeded()
235 {
236     if (vm_->GetGCTrigger()->IsGcTriggered()) {
237         GetGC()->Trigger();
238     }
239 }
240 
AllocateExtFrame(size_t size,size_t ext_sz)241 Frame *HeapManager::AllocateExtFrame(size_t size, size_t ext_sz)
242 {
243     ASSERT(GetGC()->IsMutatorAllowed());
244     StackFrameAllocator *frame_allocator = GetCurrentStackFrameAllocator();
245     return Frame::FromExt(frame_allocator->Alloc(size), ext_sz);
246 }
247 
CreateNewTLAB(ManagedThread * thread)248 bool HeapManager::CreateNewTLAB(ManagedThread *thread)
249 {
250     ASSERT(GetGC()->IsMutatorAllowed());
251     ASSERT(thread != nullptr);
252     TLAB *new_tlab = objectAllocator_.AsObjectAllocator()->CreateNewTLAB(thread);
253     if (new_tlab != nullptr) {
254         RegisterTLAB(thread->GetTLAB());
255         thread->UpdateTLAB(new_tlab);
256         return true;
257     }
258     return false;
259 }
260 
RegisterTLAB(const TLAB * tlab)261 void HeapManager::RegisterTLAB(const TLAB *tlab)
262 {
263     ASSERT(tlab != nullptr);
264     if (!PANDA_TRACK_TLAB_ALLOCATIONS && (tlab->GetOccupiedSize() != 0)) {
265         mem_stats_->RecordAllocateObject(tlab->GetOccupiedSize(), SpaceType::SPACE_TYPE_OBJECT);
266     }
267 }
268 
FreeExtFrame(Frame * frame,size_t ext_sz)269 void HeapManager::FreeExtFrame(Frame *frame, size_t ext_sz)
270 {
271     ASSERT(GetGC()->IsMutatorAllowed());
272     StackFrameAllocator *frame_allocator = GetCurrentStackFrameAllocator();
273     frame_allocator->Free(Frame::ToExt(frame, ext_sz));
274 }
275 
GetCodeAllocator() const276 CodeAllocator *HeapManager::GetCodeAllocator() const
277 {
278     return codeAllocator_;
279 }
280 
GetInternalAllocator()281 InternalAllocatorPtr HeapManager::GetInternalAllocator()
282 {
283     return internalAllocator_;
284 }
285 
GetObjectAllocator()286 ObjectAllocatorPtr HeapManager::GetObjectAllocator()
287 {
288     return objectAllocator_;
289 }
290 
GetCurrentStackFrameAllocator()291 StackFrameAllocator *HeapManager::GetCurrentStackFrameAllocator()
292 {
293     return ManagedThread::GetCurrent()->GetStackFrameAllocator();
294 }
295 
PreZygoteFork()296 void HeapManager::PreZygoteFork()
297 {
298     GetGC()->WaitForGCOnPygoteFork(GCTask(GCTaskCause::PYGOTE_FORK_CAUSE));
299 }
300 
GetTargetHeapUtilization() const301 float HeapManager::GetTargetHeapUtilization() const
302 {
303     return target_utilization_;
304 }
305 
SetTargetHeapUtilization(float target)306 void HeapManager::SetTargetHeapUtilization(float target)
307 {
308     ASSERT_PRINT(target > 0.0F && target < 1.0F, "Target heap utilization should be in the range (0,1)");
309     target_utilization_ = target;
310 }
311 
GetTotalMemory() const312 size_t HeapManager::GetTotalMemory() const
313 {
314     return vm_->GetMemStats()->GetFootprintHeap();
315 }
316 
GetFreeMemory() const317 size_t HeapManager::GetFreeMemory() const
318 {
319     return helpers::UnsignedDifference(GetTotalMemory(), vm_->GetMemStats()->GetFootprintHeap());
320 }
321 
DumpHeap(PandaOStringStream * o_string_stream)322 void HeapManager::DumpHeap(PandaOStringStream *o_string_stream)
323 {
324     size_t obj_cnt = 0;
325     *o_string_stream << "Dumping heap" << std::endl;
326     objectAllocator_->IterateOverObjects([&obj_cnt, &o_string_stream](ObjectHeader *mem) {
327         DumpObject(static_cast<ObjectHeader *>(mem), o_string_stream);
328         obj_cnt++;
329     });
330     *o_string_stream << "Total dumped " << obj_cnt << std::endl;
331 }
332 
333 /**
334  * \brief Check whether the given object is an instance of the given class.
335  * @param obj - ObjectHeader pointer
336  * @param h_class - Class pointer
337  * @param assignable - whether the subclass of h_class counts
338  * @return true if obj is instanceOf h_class, otherwise false
339  */
MatchesClass(const ObjectHeader * obj,const Class * h_class,bool assignable)340 static bool MatchesClass(const ObjectHeader *obj, const Class *h_class, bool assignable)
341 {
342     if (assignable) {
343         return obj->IsInstanceOf(h_class);
344     }
345     return obj->ClassAddr<Class>() == h_class;
346 }
347 
CountInstances(const PandaVector<Class * > & classes,bool assignable,uint64_t * counts)348 void HeapManager::CountInstances(const PandaVector<Class *> &classes, bool assignable, uint64_t *counts)
349 {
350     auto objects_checker = [&](ObjectHeader *obj) {
351         for (size_t i = 0; i < classes.size(); ++i) {
352             if (classes[i] == nullptr) {
353                 continue;
354             }
355             if (MatchesClass(obj, classes[i], assignable)) {
356                 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
357                 ++counts[i];
358             }
359         }
360     };
361     {
362         MTManagedThread *thread = MTManagedThread::GetCurrent();
363         ASSERT(thread != nullptr);
364         ScopedChangeThreadStatus sts(thread, ThreadStatus::RUNNING);
365         ScopedSuspendAllThreadsRunning ssatr(thread->GetVM()->GetRendezvous());
366         GetObjectAllocator().AsObjectAllocator()->IterateOverObjects(objects_checker);
367     }
368 }
369 
SetIsFinalizableFunc(IsObjectFinalizebleFunc func)370 void HeapManager::SetIsFinalizableFunc(IsObjectFinalizebleFunc func)
371 {
372     IsObjectFinalizebleFunc_ = func;
373 }
374 
SetRegisterFinalizeReferenceFunc(RegisterFinalizeReferenceFunc func)375 void HeapManager::SetRegisterFinalizeReferenceFunc(RegisterFinalizeReferenceFunc func)
376 {
377     RegisterFinalizeReferenceFunc_ = func;
378 }
379 
IsObjectFinalized(BaseClass * cls)380 bool HeapManager::IsObjectFinalized(BaseClass *cls)
381 {
382     return IsObjectFinalizebleFunc_ != nullptr && IsObjectFinalizebleFunc_(cls);
383 }
384 
RegisterFinalizedObject(ObjectHeader * object,BaseClass * cls,bool is_object_finalizable)385 void HeapManager::RegisterFinalizedObject(ObjectHeader *object, BaseClass *cls, bool is_object_finalizable)
386 {
387     if (is_object_finalizable) {
388         ASSERT(RegisterFinalizeReferenceFunc_ != nullptr);
389         RegisterFinalizeReferenceFunc_(object, cls);
390     }
391 }
392 
393 template ObjectHeader *HeapManager::AllocateNonMovableObject<true>(BaseClass *cls, size_t size, Alignment align,
394                                                                    ManagedThread *thread);
395 
396 template ObjectHeader *HeapManager::AllocateNonMovableObject<false>(BaseClass *cls, size_t size, Alignment align,
397                                                                     ManagedThread *thread);
398 }  // namespace panda::mem
399