1 /**
2 * Copyright (c) 2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 // These includes to avoid linker error:
16
17 #include "runtime/arch/memory_helpers.h"
18 #include "runtime/include/mem/allocator.h"
19 #include "runtime/include/mem/allocator-inl.h"
20 #include "mem/mem_pool.h"
21 #include "mem/mem_config.h"
22 #include "mem/mem.h"
23 #include "runtime/include/runtime.h"
24 #include "runtime/include/panda_vm.h"
25 #include "runtime/include/object_header.h"
26 #include "runtime/mem/bump-allocator-inl.h"
27 #include "runtime/mem/freelist_allocator-inl.h"
28 #include "runtime/mem/internal_allocator-inl.h"
29 #include "runtime/mem/runslots_allocator-inl.h"
30 #include "runtime/mem/pygote_space_allocator-inl.h"
31 #include "runtime/mem/tlab.h"
32
33 namespace ark::mem {
34
35 Allocator::~Allocator() = default;
36
ObjectAllocatorBase(MemStatsType * memStats,GCCollectMode gcCollectMode,bool createPygoteSpaceAllocator)37 ObjectAllocatorBase::ObjectAllocatorBase(MemStatsType *memStats, GCCollectMode gcCollectMode,
38 bool createPygoteSpaceAllocator)
39 : Allocator(memStats, AllocatorPurpose::ALLOCATOR_PURPOSE_OBJECT, gcCollectMode)
40 {
41 if (createPygoteSpaceAllocator) {
42 pygoteSpaceAllocator_ = new (std::nothrow) PygoteAllocator(memStats);
43 pygoteAllocEnabled_ = true;
44 }
45 }
46
~ObjectAllocatorBase()47 ObjectAllocatorBase::~ObjectAllocatorBase()
48 {
49 // NOLINTNEXTLINE(readability-delete-null-pointer)
50 if (pygoteSpaceAllocator_ != nullptr) {
51 delete pygoteSpaceAllocator_;
52 }
53 }
54
HaveEnoughPoolsInObjectSpace(size_t poolsNum) const55 bool ObjectAllocatorBase::HaveEnoughPoolsInObjectSpace(size_t poolsNum) const
56 {
57 auto memPool = PoolManager::GetMmapMemPool();
58 auto poolSize = std::max(PANDA_DEFAULT_POOL_SIZE, PANDA_DEFAULT_ALLOCATOR_POOL_SIZE);
59 return memPool->HaveEnoughPoolsInObjectSpace(poolsNum, poolSize);
60 }
61
MemoryInitialize(void * mem,size_t size) const62 void ObjectAllocatorBase::MemoryInitialize(void *mem, size_t size) const
63 {
64 TSAN_ANNOTATE_IGNORE_WRITES_BEGIN();
65 memset_s(mem, size, 0, size);
66 TSAN_ANNOTATE_IGNORE_WRITES_END();
67 // zero init should be visible from other threads even if pointer to object was fetched
68 // without 'volatile' specifier so full memory barrier is required
69 // required by some language-specs
70 arch::FullMemoryBarrier();
71 }
72
ObjectMemoryInit(void * mem,size_t size) const73 void ObjectAllocatorBase::ObjectMemoryInit(void *mem, size_t size) const
74 {
75 if (mem == nullptr) {
76 return;
77 }
78 [[maybe_unused]] auto *object = static_cast<ObjectHeader *>(mem);
79 ASSERT(object->AtomicGetMark().GetValue() == 0);
80 ASSERT(object->ClassAddr<BaseClass *>() == nullptr);
81 ASSERT(size >= ObjectHeader::ObjectHeaderSize());
82 // zeroing according to newobj description in ISA
83 size_t sizeToInit = size - ObjectHeader::ObjectHeaderSize();
84 void *memToInit = ToVoidPtr(ToUintPtr(mem) + ObjectHeader::ObjectHeaderSize());
85 MemoryInitialize(memToInit, sizeToInit);
86 }
87
88 template <MTModeT MT_MODE>
ObjectAllocatorNoGen(MemStatsType * memStats,bool createPygoteSpaceAllocator)89 ObjectAllocatorNoGen<MT_MODE>::ObjectAllocatorNoGen(MemStatsType *memStats, bool createPygoteSpaceAllocator)
90 : ObjectAllocatorBase(memStats, GCCollectMode::GC_ALL, createPygoteSpaceAllocator)
91 {
92 const auto &options = Runtime::GetOptions();
93 heapSpace_.Initialize(MemConfig::GetInitialHeapSizeLimit(), MemConfig::GetHeapSizeLimit(),
94 options.GetMinHeapFreePercentage(), options.GetMaxHeapFreePercentage());
95 if (createPygoteSpaceAllocator) {
96 ASSERT(pygoteSpaceAllocator_ != nullptr);
97 pygoteSpaceAllocator_->SetHeapSpace(&heapSpace_);
98 }
99 objectAllocator_ = new (std::nothrow) ObjectAllocator(memStats);
100 largeObjectAllocator_ = new (std::nothrow) LargeObjectAllocator(memStats);
101 humongousObjectAllocator_ = new (std::nothrow) HumongousObjectAllocator(memStats);
102 }
103
104 template <MTModeT MT_MODE>
~ObjectAllocatorNoGen()105 ObjectAllocatorNoGen<MT_MODE>::~ObjectAllocatorNoGen()
106 {
107 delete objectAllocator_;
108 delete largeObjectAllocator_;
109 delete humongousObjectAllocator_;
110 }
111
112 template <MTModeT MT_MODE>
Allocate(size_t size,Alignment align,ark::ManagedThread * thread,ObjMemInitPolicy objInit,bool pinned)113 void *ObjectAllocatorNoGen<MT_MODE>::Allocate(size_t size, Alignment align, [[maybe_unused]] ark::ManagedThread *thread,
114 ObjMemInitPolicy objInit, [[maybe_unused]] bool pinned)
115 {
116 void *mem = nullptr;
117 size_t alignedSize = AlignUp(size, GetAlignmentInBytes(align));
118 if (alignedSize <= ObjectAllocator::GetMaxSize()) {
119 size_t poolSize = std::max(PANDA_DEFAULT_POOL_SIZE, ObjectAllocator::GetMinPoolSize());
120 mem = AllocateSafe(size, align, objectAllocator_, poolSize, SpaceType::SPACE_TYPE_OBJECT, &heapSpace_);
121 } else if (alignedSize <= LargeObjectAllocator::GetMaxSize()) {
122 size_t poolSize = std::max(PANDA_DEFAULT_POOL_SIZE, LargeObjectAllocator::GetMinPoolSize());
123 mem = AllocateSafe(size, align, largeObjectAllocator_, poolSize, SpaceType::SPACE_TYPE_OBJECT, &heapSpace_);
124 } else {
125 size_t poolSize = std::max(PANDA_DEFAULT_POOL_SIZE, HumongousObjectAllocator::GetMinPoolSize(size));
126 mem = AllocateSafe(size, align, humongousObjectAllocator_, poolSize, SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT,
127 &heapSpace_);
128 }
129 if (objInit == ObjMemInitPolicy::REQUIRE_INIT) {
130 ObjectMemoryInit(mem, size);
131 }
132 return mem;
133 }
134
135 template <MTModeT MT_MODE>
AllocateNonMovable(size_t size,Alignment align,ark::ManagedThread * thread,ObjMemInitPolicy objInit)136 void *ObjectAllocatorNoGen<MT_MODE>::AllocateNonMovable(size_t size, Alignment align, ark::ManagedThread *thread,
137 ObjMemInitPolicy objInit)
138 {
139 void *mem = nullptr;
140 // before pygote fork, allocate small non-movable objects in pygote space
141 if (UNLIKELY(IsPygoteAllocEnabled() && pygoteSpaceAllocator_->CanAllocNonMovable(size, align))) {
142 mem = pygoteSpaceAllocator_->Alloc(size, align);
143 } else {
144 // Without generations - no compaction now, so all allocations are non-movable
145 mem = Allocate(size, align, thread, objInit, false);
146 }
147 if (objInit == ObjMemInitPolicy::REQUIRE_INIT) {
148 ObjectMemoryInit(mem, size);
149 }
150 return mem;
151 }
152
153 template <MTModeT MT_MODE>
CalculateAllocatorAlignment(size_t align)154 Alignment ObjectAllocatorNoGen<MT_MODE>::CalculateAllocatorAlignment(size_t align)
155 {
156 ASSERT(GetPurpose() == AllocatorPurpose::ALLOCATOR_PURPOSE_OBJECT);
157 return GetAlignment(align);
158 }
159
160 template <MTModeT MT_MODE>
VisitAndRemoveAllPools(const MemVisitor & memVisitor)161 void ObjectAllocatorNoGen<MT_MODE>::VisitAndRemoveAllPools(const MemVisitor &memVisitor)
162 {
163 if (pygoteSpaceAllocator_ != nullptr) {
164 pygoteSpaceAllocator_->VisitAndRemoveAllPools(memVisitor);
165 }
166 objectAllocator_->VisitAndRemoveAllPools(memVisitor);
167 largeObjectAllocator_->VisitAndRemoveAllPools(memVisitor);
168 humongousObjectAllocator_->VisitAndRemoveAllPools(memVisitor);
169 }
170
171 template <MTModeT MT_MODE>
VisitAndRemoveFreePools(const MemVisitor & memVisitor)172 void ObjectAllocatorNoGen<MT_MODE>::VisitAndRemoveFreePools(const MemVisitor &memVisitor)
173 {
174 if (pygoteSpaceAllocator_ != nullptr) {
175 pygoteSpaceAllocator_->VisitAndRemoveFreePools(memVisitor);
176 }
177 objectAllocator_->VisitAndRemoveFreePools(memVisitor);
178 largeObjectAllocator_->VisitAndRemoveFreePools(memVisitor);
179 humongousObjectAllocator_->VisitAndRemoveFreePools(memVisitor);
180 }
181
182 template <MTModeT MT_MODE>
IterateOverObjects(const ObjectVisitor & objectVisitor)183 void ObjectAllocatorNoGen<MT_MODE>::IterateOverObjects(const ObjectVisitor &objectVisitor)
184 {
185 if (pygoteSpaceAllocator_ != nullptr) {
186 pygoteSpaceAllocator_->IterateOverObjects(objectVisitor);
187 }
188 objectAllocator_->IterateOverObjects(objectVisitor);
189 largeObjectAllocator_->IterateOverObjects(objectVisitor);
190 humongousObjectAllocator_->IterateOverObjects(objectVisitor);
191 }
192
193 template <MTModeT MT_MODE>
IterateRegularSizeObjects(const ObjectVisitor & objectVisitor)194 void ObjectAllocatorNoGen<MT_MODE>::IterateRegularSizeObjects(const ObjectVisitor &objectVisitor)
195 {
196 objectAllocator_->IterateOverObjects(objectVisitor);
197 }
198
199 template <MTModeT MT_MODE>
IterateNonRegularSizeObjects(const ObjectVisitor & objectVisitor)200 void ObjectAllocatorNoGen<MT_MODE>::IterateNonRegularSizeObjects(const ObjectVisitor &objectVisitor)
201 {
202 if (pygoteSpaceAllocator_ != nullptr) {
203 pygoteSpaceAllocator_->IterateOverObjects(objectVisitor);
204 }
205 largeObjectAllocator_->IterateOverObjects(objectVisitor);
206 humongousObjectAllocator_->IterateOverObjects(objectVisitor);
207 }
208
209 template <MTModeT MT_MODE>
FreeObjectsMovedToPygoteSpace()210 void ObjectAllocatorNoGen<MT_MODE>::FreeObjectsMovedToPygoteSpace()
211 {
212 // clear because we have move all objects in it to pygote space
213 objectAllocator_->VisitAndRemoveAllPools(
214 [](void *mem, size_t size) { PoolManager::GetMmapMemPool()->FreePool(mem, size); });
215 delete objectAllocator_;
216 objectAllocator_ = new (std::nothrow) ObjectAllocator(memStats_);
217 }
218
219 template <MTModeT MT_MODE>
Collect(const GCObjectVisitor & gcObjectVisitor,GCCollectMode collectMode)220 void ObjectAllocatorNoGen<MT_MODE>::Collect(const GCObjectVisitor &gcObjectVisitor,
221 [[maybe_unused]] GCCollectMode collectMode)
222 {
223 if (pygoteSpaceAllocator_ != nullptr) {
224 pygoteSpaceAllocator_->Collect(gcObjectVisitor);
225 }
226 objectAllocator_->Collect(gcObjectVisitor);
227 largeObjectAllocator_->Collect(gcObjectVisitor);
228 humongousObjectAllocator_->Collect(gcObjectVisitor);
229 }
230
231 // if there is a common base class for these allocators, we could split this func and return the pointer to the
232 // allocator containing the object
233 template <MTModeT MT_MODE>
ContainObject(const ObjectHeader * obj) const234 bool ObjectAllocatorNoGen<MT_MODE>::ContainObject(const ObjectHeader *obj) const
235 {
236 if (objectAllocator_->ContainObject(obj)) {
237 return true;
238 }
239 if (largeObjectAllocator_->ContainObject(obj)) {
240 return true;
241 }
242 if (humongousObjectAllocator_->ContainObject(obj)) {
243 return true;
244 }
245
246 return false;
247 }
248
249 template <MTModeT MT_MODE>
IsLive(const ObjectHeader * obj)250 bool ObjectAllocatorNoGen<MT_MODE>::IsLive(const ObjectHeader *obj)
251 {
252 if (pygoteSpaceAllocator_ != nullptr && pygoteSpaceAllocator_->ContainObject(obj)) {
253 return pygoteSpaceAllocator_->IsLive(obj);
254 }
255 if (objectAllocator_->ContainObject(obj)) {
256 return objectAllocator_->IsLive(obj);
257 }
258 if (largeObjectAllocator_->ContainObject(obj)) {
259 return largeObjectAllocator_->IsLive(obj);
260 }
261 if (humongousObjectAllocator_->ContainObject(obj)) {
262 return humongousObjectAllocator_->IsLive(obj);
263 }
264 return false;
265 }
266
267 template <MTModeT MT_MODE>
Allocate(size_t size,Alignment align,ark::ManagedThread * thread,ObjMemInitPolicy objInit,bool pinned)268 void *ObjectAllocatorGen<MT_MODE>::Allocate(size_t size, Alignment align, [[maybe_unused]] ark::ManagedThread *thread,
269 ObjMemInitPolicy objInit, [[maybe_unused]] bool pinned)
270 {
271 void *mem = nullptr;
272 size_t alignedSize = AlignUp(size, GetAlignmentInBytes(align));
273 if (LIKELY(alignedSize <= GetYoungAllocMaxSize())) {
274 mem = youngGenAllocator_->Alloc(size, align);
275 } else {
276 mem = AllocateTenured(size);
277 }
278 if (objInit == ObjMemInitPolicy::REQUIRE_INIT) {
279 ObjectMemoryInit(mem, size);
280 }
281 return mem;
282 }
283
284 template <MTModeT MT_MODE>
AllocateNonMovable(size_t size,Alignment align,ark::ManagedThread * thread,ObjMemInitPolicy objInit)285 void *ObjectAllocatorGen<MT_MODE>::AllocateNonMovable(size_t size, Alignment align,
286 [[maybe_unused]] ark::ManagedThread *thread,
287 ObjMemInitPolicy objInit)
288 {
289 void *mem = nullptr;
290 // before pygote fork, allocate small non-movable objects in pygote space
291 if (UNLIKELY(IsPygoteAllocEnabled() && pygoteSpaceAllocator_->CanAllocNonMovable(size, align))) {
292 mem = pygoteSpaceAllocator_->Alloc(size, align);
293 } else {
294 size_t alignedSize = AlignUp(size, GetAlignmentInBytes(align));
295 if (alignedSize <= ObjectAllocator::GetMaxSize()) {
296 size_t poolSize = std::max(PANDA_DEFAULT_POOL_SIZE, ObjectAllocator::GetMinPoolSize());
297 mem = AllocateSafe(size, align, nonMovableObjectAllocator_, poolSize,
298 SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT, &heapSpaces_);
299 } else if (alignedSize <= LargeObjectAllocator::GetMaxSize()) {
300 size_t poolSize = std::max(PANDA_DEFAULT_POOL_SIZE, LargeObjectAllocator::GetMinPoolSize());
301 mem = AllocateSafe(size, align, largeNonMovableObjectAllocator_, poolSize,
302 SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT, &heapSpaces_);
303 } else {
304 // We don't need special allocator for this
305 // Humongous objects are non-movable
306 size_t poolSize = std::max(PANDA_DEFAULT_POOL_SIZE, HumongousObjectAllocator::GetMinPoolSize(size));
307 mem = AllocateSafe(size, align, humongousObjectAllocator_, poolSize, SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT,
308 &heapSpaces_);
309 }
310 }
311 if (objInit == ObjMemInitPolicy::REQUIRE_INIT) {
312 ObjectMemoryInit(mem, size);
313 }
314 return mem;
315 }
316
317 template <MTModeT MT_MODE>
CalculateAllocatorAlignment(size_t align)318 Alignment ObjectAllocatorGen<MT_MODE>::CalculateAllocatorAlignment(size_t align)
319 {
320 ASSERT(GetPurpose() == AllocatorPurpose::ALLOCATOR_PURPOSE_OBJECT);
321 return GetAlignment(align);
322 }
323
324 template <MTModeT MT_MODE>
VisitAndRemoveAllPools(const MemVisitor & memVisitor)325 void ObjectAllocatorGen<MT_MODE>::VisitAndRemoveAllPools(const MemVisitor &memVisitor)
326 {
327 if (pygoteSpaceAllocator_ != nullptr) {
328 pygoteSpaceAllocator_->VisitAndRemoveAllPools(memVisitor);
329 }
330 objectAllocator_->VisitAndRemoveAllPools(memVisitor);
331 largeObjectAllocator_->VisitAndRemoveAllPools(memVisitor);
332 humongousObjectAllocator_->VisitAndRemoveAllPools(memVisitor);
333 nonMovableObjectAllocator_->VisitAndRemoveAllPools(memVisitor);
334 largeNonMovableObjectAllocator_->VisitAndRemoveAllPools(memVisitor);
335 }
336
337 template <MTModeT MT_MODE>
VisitAndRemoveFreePools(const MemVisitor & memVisitor)338 void ObjectAllocatorGen<MT_MODE>::VisitAndRemoveFreePools(const MemVisitor &memVisitor)
339 {
340 if (pygoteSpaceAllocator_ != nullptr) {
341 pygoteSpaceAllocator_->VisitAndRemoveFreePools(memVisitor);
342 }
343 objectAllocator_->VisitAndRemoveFreePools(memVisitor);
344 largeObjectAllocator_->VisitAndRemoveFreePools(memVisitor);
345 humongousObjectAllocator_->VisitAndRemoveFreePools(memVisitor);
346 nonMovableObjectAllocator_->VisitAndRemoveFreePools(memVisitor);
347 largeNonMovableObjectAllocator_->VisitAndRemoveFreePools(memVisitor);
348 }
349
350 template <MTModeT MT_MODE>
IterateOverYoungObjects(const ObjectVisitor & objectVisitor)351 void ObjectAllocatorGen<MT_MODE>::IterateOverYoungObjects(const ObjectVisitor &objectVisitor)
352 {
353 youngGenAllocator_->IterateOverObjects(objectVisitor);
354 }
355
356 template <MTModeT MT_MODE>
IterateOverTenuredObjects(const ObjectVisitor & objectVisitor)357 void ObjectAllocatorGen<MT_MODE>::IterateOverTenuredObjects(const ObjectVisitor &objectVisitor)
358 {
359 if (pygoteSpaceAllocator_ != nullptr) {
360 pygoteSpaceAllocator_->IterateOverObjects(objectVisitor);
361 }
362 objectAllocator_->IterateOverObjects(objectVisitor);
363 largeObjectAllocator_->IterateOverObjects(objectVisitor);
364 humongousObjectAllocator_->IterateOverObjects(objectVisitor);
365 nonMovableObjectAllocator_->IterateOverObjects(objectVisitor);
366 largeNonMovableObjectAllocator_->IterateOverObjects(objectVisitor);
367 }
368
369 template <MTModeT MT_MODE>
IterateOverObjects(const ObjectVisitor & objectVisitor)370 void ObjectAllocatorGen<MT_MODE>::IterateOverObjects(const ObjectVisitor &objectVisitor)
371 {
372 if (pygoteSpaceAllocator_ != nullptr) {
373 pygoteSpaceAllocator_->IterateOverObjects(objectVisitor);
374 }
375 youngGenAllocator_->IterateOverObjects(objectVisitor);
376 objectAllocator_->IterateOverObjects(objectVisitor);
377 largeObjectAllocator_->IterateOverObjects(objectVisitor);
378 humongousObjectAllocator_->IterateOverObjects(objectVisitor);
379 nonMovableObjectAllocator_->IterateOverObjects(objectVisitor);
380 largeNonMovableObjectAllocator_->IterateOverObjects(objectVisitor);
381 }
382
383 template <MTModeT MT_MODE>
IterateRegularSizeObjects(const ObjectVisitor & objectVisitor)384 void ObjectAllocatorGen<MT_MODE>::IterateRegularSizeObjects(const ObjectVisitor &objectVisitor)
385 {
386 objectAllocator_->IterateOverObjects(objectVisitor);
387 }
388
389 template <MTModeT MT_MODE>
IterateNonRegularSizeObjects(const ObjectVisitor & objectVisitor)390 void ObjectAllocatorGen<MT_MODE>::IterateNonRegularSizeObjects(const ObjectVisitor &objectVisitor)
391 {
392 if (pygoteSpaceAllocator_ != nullptr) {
393 pygoteSpaceAllocator_->IterateOverObjects(objectVisitor);
394 }
395 largeObjectAllocator_->IterateOverObjects(objectVisitor);
396 humongousObjectAllocator_->IterateOverObjects(objectVisitor);
397 nonMovableObjectAllocator_->IterateOverObjects(objectVisitor);
398 largeNonMovableObjectAllocator_->IterateOverObjects(objectVisitor);
399 }
400
401 template <MTModeT MT_MODE>
FreeObjectsMovedToPygoteSpace()402 void ObjectAllocatorGen<MT_MODE>::FreeObjectsMovedToPygoteSpace()
403 {
404 // clear because we have move all objects in it to pygote space
405 objectAllocator_->VisitAndRemoveAllPools(
406 [](void *mem, size_t size) { PoolManager::GetMmapMemPool()->FreePool(mem, size); });
407 delete objectAllocator_;
408 objectAllocator_ = new (std::nothrow) ObjectAllocator(memStats_);
409 }
410
411 template <MTModeT MT_MODE>
Collect(const GCObjectVisitor & gcObjectVisitor,GCCollectMode collectMode)412 void ObjectAllocatorGen<MT_MODE>::Collect(const GCObjectVisitor &gcObjectVisitor, GCCollectMode collectMode)
413 {
414 switch (collectMode) {
415 case GCCollectMode::GC_MINOR:
416 break;
417 case GCCollectMode::GC_ALL:
418 case GCCollectMode::GC_MAJOR:
419 if (pygoteSpaceAllocator_ != nullptr) {
420 pygoteSpaceAllocator_->Collect(gcObjectVisitor);
421 }
422 objectAllocator_->Collect(gcObjectVisitor);
423 largeObjectAllocator_->Collect(gcObjectVisitor);
424 humongousObjectAllocator_->Collect(gcObjectVisitor);
425 nonMovableObjectAllocator_->Collect(gcObjectVisitor);
426 largeNonMovableObjectAllocator_->Collect(gcObjectVisitor);
427 break;
428 case GCCollectMode::GC_FULL:
429 UNREACHABLE();
430 break;
431 case GC_NONE:
432 UNREACHABLE();
433 break;
434 default:
435 UNREACHABLE();
436 }
437 }
438
439 template <MTModeT MT_MODE>
GetRegularObjectMaxSize()440 size_t ObjectAllocatorNoGen<MT_MODE>::GetRegularObjectMaxSize()
441 {
442 return ObjectAllocator::GetMaxSize();
443 }
444
445 template <MTModeT MT_MODE>
GetLargeObjectMaxSize()446 size_t ObjectAllocatorNoGen<MT_MODE>::GetLargeObjectMaxSize()
447 {
448 return LargeObjectAllocator::GetMaxSize();
449 }
450
451 template <MTModeT MT_MODE>
CreateNewTLAB(size_t tlabSize)452 TLAB *ObjectAllocatorNoGen<MT_MODE>::CreateNewTLAB([[maybe_unused]] size_t tlabSize)
453 {
454 LOG(FATAL, ALLOC) << "TLAB is not supported for this allocator";
455 return nullptr;
456 }
457
458 template <MTModeT MT_MODE>
GetTLABMaxAllocSize()459 size_t ObjectAllocatorNoGen<MT_MODE>::GetTLABMaxAllocSize()
460 {
461 // NOTE(aemelenko): TLAB usage is not supported for non-gen GCs.
462 return 0;
463 }
464
ObjectAllocatorGenBase(MemStatsType * memStats,GCCollectMode gcCollectMode,bool createPygoteSpaceAllocator)465 ObjectAllocatorGenBase::ObjectAllocatorGenBase(MemStatsType *memStats, GCCollectMode gcCollectMode,
466 bool createPygoteSpaceAllocator)
467 : ObjectAllocatorBase(memStats, gcCollectMode, createPygoteSpaceAllocator)
468 {
469 const auto &options = Runtime::GetOptions();
470 heapSpaces_.Initialize(options.GetInitYoungSpaceSize(), options.WasSetInitYoungSpaceSize(),
471 options.GetYoungSpaceSize(), options.WasSetYoungSpaceSize(),
472 MemConfig::GetInitialHeapSizeLimit(), MemConfig::GetHeapSizeLimit(),
473 options.GetMinHeapFreePercentage(), options.GetMaxHeapFreePercentage());
474 if (createPygoteSpaceAllocator) {
475 ASSERT(pygoteSpaceAllocator_ != nullptr);
476 pygoteSpaceAllocator_->SetHeapSpace(&heapSpaces_);
477 }
478 }
479
480 template <MTModeT MT_MODE>
ObjectAllocatorGen(MemStatsType * memStats,bool createPygoteSpaceAllocator)481 ObjectAllocatorGen<MT_MODE>::ObjectAllocatorGen(MemStatsType *memStats, bool createPygoteSpaceAllocator)
482 : ObjectAllocatorGenBase(memStats, GCCollectMode::GC_ALL, createPygoteSpaceAllocator)
483 {
484 // For Gen-GC we use alone pool for young space, so we will use full such pool
485 heapSpaces_.UseFullYoungSpace();
486 size_t youngSpaceSize = heapSpaces_.GetCurrentYoungSize();
487 size_t initTlabSize = Runtime::GetOptions().GetInitTlabSize();
488 auto youngSharedSpaceSize = Runtime::GetOptions().GetYoungSharedSpaceSize();
489 ASSERT(youngSpaceSize >= youngSharedSpaceSize);
490 ASSERT(initTlabSize != 0);
491 size_t maxTlabsCountInYoungGen;
492 if constexpr (MT_MODE == MT_MODE_SINGLE) {
493 // For single-threaded VMs allocate whole private young space for TLAB
494 maxTlabsCountInYoungGen = 1;
495 } else {
496 maxTlabsCountInYoungGen = (youngSpaceSize - youngSharedSpaceSize) / initTlabSize;
497 ASSERT(((youngSpaceSize - youngSharedSpaceSize) % initTlabSize) == 0);
498 }
499 ASSERT(maxTlabsCountInYoungGen * initTlabSize <= youngSpaceSize);
500
501 // NOTE(aemelenko): Missed an allocator pointer
502 // because we construct BumpPointer Allocator after calling AllocArena method
503 auto youngPool = heapSpaces_.AllocAlonePoolForYoung(SpaceType::SPACE_TYPE_OBJECT,
504 YoungGenAllocator::GetAllocatorType(), &youngGenAllocator_);
505 youngGenAllocator_ = new (std::nothrow)
506 YoungGenAllocator(std::move(youngPool), SpaceType::SPACE_TYPE_OBJECT, memStats, maxTlabsCountInYoungGen);
507 objectAllocator_ = new (std::nothrow) ObjectAllocator(memStats);
508 largeObjectAllocator_ = new (std::nothrow) LargeObjectAllocator(memStats);
509 humongousObjectAllocator_ = new (std::nothrow) HumongousObjectAllocator(memStats);
510 nonMovableObjectAllocator_ = new (std::nothrow) ObjectAllocator(memStats, SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT);
511 largeNonMovableObjectAllocator_ =
512 new (std::nothrow) LargeObjectAllocator(memStats, SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT);
513 memStats_ = memStats;
514 GetYoungRanges().push_back({0, 0});
515 }
516
517 template <MTModeT MT_MODE>
~ObjectAllocatorGen()518 ObjectAllocatorGen<MT_MODE>::~ObjectAllocatorGen()
519 {
520 // need to free the pool space when the allocator destroy
521 youngGenAllocator_->VisitAndRemoveAllPools(
522 [](void *mem, [[maybe_unused]] size_t size) { PoolManager::GetMmapMemPool()->FreePool(mem, size); });
523 delete youngGenAllocator_;
524 delete objectAllocator_;
525 delete largeObjectAllocator_;
526 delete humongousObjectAllocator_;
527 delete nonMovableObjectAllocator_;
528 delete largeNonMovableObjectAllocator_;
529 }
530 template <MTModeT MT_MODE>
GetRegularObjectMaxSize()531 size_t ObjectAllocatorGen<MT_MODE>::GetRegularObjectMaxSize()
532 {
533 return ObjectAllocator::GetMaxSize();
534 }
535
536 template <MTModeT MT_MODE>
GetLargeObjectMaxSize()537 size_t ObjectAllocatorGen<MT_MODE>::GetLargeObjectMaxSize()
538 {
539 return LargeObjectAllocator::GetMaxSize();
540 }
541
542 template <MTModeT MT_MODE>
IsObjectInYoungSpace(const ObjectHeader * obj)543 bool ObjectAllocatorGen<MT_MODE>::IsObjectInYoungSpace(const ObjectHeader *obj)
544 {
545 if (!youngGenAllocator_) {
546 return false;
547 }
548 return youngGenAllocator_->GetMemRange().IsAddressInRange(ToUintPtr(obj));
549 }
550
551 template <MTModeT MT_MODE>
IsIntersectedWithYoung(const MemRange & memRange)552 bool ObjectAllocatorGen<MT_MODE>::IsIntersectedWithYoung(const MemRange &memRange)
553 {
554 return youngGenAllocator_->GetMemRange().IsIntersect(memRange);
555 }
556
557 template <MTModeT MT_MODE>
IsObjectInNonMovableSpace(const ObjectHeader * obj)558 bool ObjectAllocatorGen<MT_MODE>::IsObjectInNonMovableSpace(const ObjectHeader *obj)
559 {
560 return nonMovableObjectAllocator_->ContainObject(obj);
561 }
562
563 template <MTModeT MT_MODE>
HasYoungSpace()564 bool ObjectAllocatorGen<MT_MODE>::HasYoungSpace()
565 {
566 return youngGenAllocator_ != nullptr;
567 }
568
569 template <MTModeT MT_MODE>
GetYoungSpaceMemRanges()570 const std::vector<MemRange> &ObjectAllocatorGen<MT_MODE>::GetYoungSpaceMemRanges()
571 {
572 return GetYoungRanges();
573 }
574
575 template <MTModeT MT_MODE>
GetYoungSpaceBitmaps()576 std::vector<MarkBitmap *> &ObjectAllocatorGen<MT_MODE>::GetYoungSpaceBitmaps()
577 {
578 static std::vector<MarkBitmap *> ret;
579 LOG(FATAL, ALLOC) << "GetYoungSpaceBitmaps not applicable for ObjectAllocatorGen";
580 return ret;
581 }
582
583 template <MTModeT MT_MODE>
ResetYoungAllocator()584 void ObjectAllocatorGen<MT_MODE>::ResetYoungAllocator()
585 {
586 MemStatsType *memStats = memStats_;
587 auto threadCallback = [&memStats](ManagedThread *thread) {
588 if (!PANDA_TRACK_TLAB_ALLOCATIONS && (thread->GetTLAB()->GetOccupiedSize() != 0)) {
589 memStats->RecordAllocateObject(thread->GetTLAB()->GetOccupiedSize(), SpaceType::SPACE_TYPE_OBJECT);
590 }
591 if (Runtime::GetOptions().IsAdaptiveTlabSize()) {
592 thread->GetWeightedTlabAverage()->ComputeNewSumAndResetSamples();
593 }
594 // Here we should not collect current TLAB fill statistics for adaptive size
595 // since it may not be completely filled before resetting
596 thread->ClearTLAB();
597 return true;
598 };
599 Thread::GetCurrent()->GetVM()->GetThreadManager()->EnumerateThreads(threadCallback);
600 youngGenAllocator_->Reset();
601 }
602
603 template <MTModeT MT_MODE>
CreateNewTLAB(size_t tlabSize)604 TLAB *ObjectAllocatorGen<MT_MODE>::CreateNewTLAB(size_t tlabSize)
605 {
606 TLAB *newTlab = youngGenAllocator_->CreateNewTLAB(tlabSize);
607 if (newTlab != nullptr) {
608 ASAN_UNPOISON_MEMORY_REGION(newTlab->GetStartAddr(), newTlab->GetSize());
609 MemoryInitialize(newTlab->GetStartAddr(), newTlab->GetSize());
610 ASAN_POISON_MEMORY_REGION(newTlab->GetStartAddr(), newTlab->GetSize());
611 }
612 return newTlab;
613 }
614
615 template <MTModeT MT_MODE>
GetTLABMaxAllocSize()616 size_t ObjectAllocatorGen<MT_MODE>::GetTLABMaxAllocSize()
617 {
618 if (Runtime::GetOptions().IsAdaptiveTlabSize()) {
619 return Runtime::GetOptions().GetMaxTlabSize();
620 }
621 return Runtime::GetOptions().GetInitTlabSize();
622 }
623
624 /* static */
625 template <MTModeT MT_MODE>
GetYoungAllocMaxSize()626 size_t ObjectAllocatorGen<MT_MODE>::GetYoungAllocMaxSize()
627 {
628 if (Runtime::GetOptions().IsAdaptiveTlabSize()) {
629 return Runtime::GetOptions().GetMaxTlabSize();
630 }
631 return Runtime::GetOptions().GetInitTlabSize();
632 }
633
634 template <MTModeT MT_MODE>
IterateOverObjectsInRange(MemRange memRange,const ObjectVisitor & objectVisitor)635 void ObjectAllocatorGen<MT_MODE>::IterateOverObjectsInRange(MemRange memRange, const ObjectVisitor &objectVisitor)
636 {
637 // we need ensure that the mem range related to a card must be located in one allocator
638 auto spaceType = PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(ToVoidPtr(memRange.GetStartAddress()));
639 auto allocInfo = PoolManager::GetMmapMemPool()->GetAllocatorInfoForAddr(ToVoidPtr(memRange.GetStartAddress()));
640 auto *allocator = allocInfo.GetAllocatorHeaderAddr();
641 switch (spaceType) {
642 case SpaceType::SPACE_TYPE_OBJECT:
643 if (allocator == objectAllocator_) {
644 objectAllocator_->IterateOverObjectsInRange(objectVisitor, ToVoidPtr(memRange.GetStartAddress()),
645 ToVoidPtr(memRange.GetEndAddress()));
646 } else if (allocator == pygoteSpaceAllocator_) {
647 pygoteSpaceAllocator_->IterateOverObjectsInRange(objectVisitor, ToVoidPtr(memRange.GetStartAddress()),
648 ToVoidPtr(memRange.GetEndAddress()));
649 } else if (allocator == &youngGenAllocator_) {
650 youngGenAllocator_->IterateOverObjectsInRange(objectVisitor, ToVoidPtr(memRange.GetStartAddress()),
651 ToVoidPtr(memRange.GetEndAddress()));
652 } else if (allocator == largeObjectAllocator_) {
653 largeObjectAllocator_->IterateOverObjectsInRange(objectVisitor, ToVoidPtr(memRange.GetStartAddress()),
654 ToVoidPtr(memRange.GetEndAddress()));
655 } else {
656 // if we reach this line, we may have an issue with multiVM CardTable iteration
657 UNREACHABLE();
658 }
659 break;
660 case SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT:
661 if (allocator == humongousObjectAllocator_) {
662 humongousObjectAllocator_->IterateOverObjectsInRange(
663 objectVisitor, ToVoidPtr(memRange.GetStartAddress()), ToVoidPtr(memRange.GetEndAddress()));
664 } else {
665 // if we reach this line, we may have an issue with multiVM CardTable iteration
666 UNREACHABLE();
667 }
668 break;
669 case SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT:
670 if (allocator == nonMovableObjectAllocator_) {
671 nonMovableObjectAllocator_->IterateOverObjectsInRange(
672 objectVisitor, ToVoidPtr(memRange.GetStartAddress()), ToVoidPtr(memRange.GetEndAddress()));
673 } else if (allocator == largeNonMovableObjectAllocator_) {
674 largeNonMovableObjectAllocator_->IterateOverObjectsInRange(
675 objectVisitor, ToVoidPtr(memRange.GetStartAddress()), ToVoidPtr(memRange.GetEndAddress()));
676 } else {
677 // if we reach this line, we may have an issue with multiVM CardTable iteration
678 UNREACHABLE();
679 }
680 break;
681 default:
682 // if we reach this line, we may have an issue with multiVM CardTable iteration
683 UNREACHABLE();
684 break;
685 }
686 }
687
688 template <MTModeT MT_MODE>
ContainObject(const ObjectHeader * obj) const689 bool ObjectAllocatorGen<MT_MODE>::ContainObject(const ObjectHeader *obj) const
690 {
691 if (pygoteSpaceAllocator_ != nullptr && pygoteSpaceAllocator_->ContainObject(obj)) {
692 return true;
693 }
694 if (youngGenAllocator_->ContainObject(obj)) {
695 return true;
696 }
697 if (objectAllocator_->ContainObject(obj)) {
698 return true;
699 }
700 if (largeObjectAllocator_->ContainObject(obj)) {
701 return true;
702 }
703 if (humongousObjectAllocator_->ContainObject(obj)) {
704 return true;
705 }
706 if (nonMovableObjectAllocator_->ContainObject(obj)) {
707 return true;
708 }
709 if (largeNonMovableObjectAllocator_->ContainObject(obj)) {
710 return true;
711 }
712
713 return false;
714 }
715
716 template <MTModeT MT_MODE>
IsLive(const ObjectHeader * obj)717 bool ObjectAllocatorGen<MT_MODE>::IsLive(const ObjectHeader *obj)
718 {
719 if (pygoteSpaceAllocator_ != nullptr && pygoteSpaceAllocator_->ContainObject(obj)) {
720 return pygoteSpaceAllocator_->IsLive(obj);
721 }
722 if (youngGenAllocator_->ContainObject(obj)) {
723 return youngGenAllocator_->IsLive(obj);
724 }
725 if (objectAllocator_->ContainObject(obj)) {
726 return objectAllocator_->IsLive(obj);
727 }
728 if (largeObjectAllocator_->ContainObject(obj)) {
729 return largeObjectAllocator_->IsLive(obj);
730 }
731 if (humongousObjectAllocator_->ContainObject(obj)) {
732 return humongousObjectAllocator_->IsLive(obj);
733 }
734 if (nonMovableObjectAllocator_->ContainObject(obj)) {
735 return nonMovableObjectAllocator_->IsLive(obj);
736 }
737 if (largeNonMovableObjectAllocator_->ContainObject(obj)) {
738 return largeNonMovableObjectAllocator_->IsLive(obj);
739 }
740
741 return false;
742 }
743
744 template <MTModeT MT_MODE>
UpdateSpaceData()745 void ObjectAllocatorGen<MT_MODE>::UpdateSpaceData()
746 {
747 GetYoungRanges().push_back(youngGenAllocator_->GetMemRange());
748 }
749
InvalidateSpaceData()750 void ObjectAllocatorGenBase::InvalidateSpaceData()
751 {
752 ranges_.clear();
753 youngBitmaps_.clear();
754 }
755
756 template class ObjectAllocatorGen<MT_MODE_SINGLE>;
757 template class ObjectAllocatorGen<MT_MODE_MULTI>;
758 template class ObjectAllocatorGen<MT_MODE_TASK>;
759 template class ObjectAllocatorNoGen<MT_MODE_SINGLE>;
760 template class ObjectAllocatorNoGen<MT_MODE_MULTI>;
761 template class ObjectAllocatorNoGen<MT_MODE_TASK>;
762
763 } // namespace ark::mem
764