1 /*
2 * Copyright (c) 2025 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "common_components/heap/allocator/region_space.h"
17
18 #include "common_components/heap/collector/collector.h"
19 #include "common_components/heap/collector/collector_resources.h"
20 #include "common_components/platform/os.h"
21 #if defined(COMMON_SANITIZER_SUPPORT)
22 #include "common_components/sanitizer/sanitizer_interface.h"
23 #endif
24 #include "common_components/common/scoped_object_access.h"
25 #include "common_components/heap/heap.h"
26
27 namespace common {
28 template <AllocBufferType type>
AllocateThreadLocalRegion(bool expectPhysicalMem)29 RegionDesc* RegionSpace::AllocateThreadLocalRegion(bool expectPhysicalMem)
30 {
31 if constexpr (type == AllocBufferType::TO) {
32 RegionDesc* region = regionManager_.TakeRegion(expectPhysicalMem, false, true);
33 if (region != nullptr) {
34 toSpace_.AddThreadLocalRegion(region);
35 }
36 return region;
37 }
38
39 ASSERT_LOGF(!IsGcThread(), "GC thread cannot take tlRegion/tlOldRegion");
40 RegionDesc* region = regionManager_.TakeRegion(expectPhysicalMem, true);
41 if (region != nullptr) {
42 GCPhase phase = Mutator::GetMutator()->GetMutatorPhase();
43 if (phase == GC_PHASE_ENUM || phase == GC_PHASE_MARK || phase == GC_PHASE_REMARK_SATB ||
44 phase == GC_PHASE_POST_MARK) {
45 region->SetMarkingLine();
46 } else if (phase == GC_PHASE_PRECOPY || phase == GC_PHASE_COPY || phase == GC_PHASE_FIX) {
47 region->SetMarkingLine();
48 region->SetCopyLine();
49 }
50
51 if constexpr (type == AllocBufferType::YOUNG) {
52 youngSpace_.AddThreadLocalRegion(region);
53 } else if constexpr (type == AllocBufferType::OLD) {
54 oldSpace_.AddThreadLocalRegion(region);
55 }
56 }
57
58 return region;
59 }
60
61 // used to dump a brief summary of all regions.
DumpAllRegionSummary(const char * msg) const62 void RegionSpace::DumpAllRegionSummary(const char* msg) const
63 {
64 auto from = fromSpace_.GetAllocatedSize();
65 auto exempt = fromSpace_.GetSurvivedSize();
66 auto to = toSpace_.GetAllocatedSize();
67 auto young = youngSpace_.GetAllocatedSize();
68 auto old = oldSpace_.GetAllocatedSize();
69 auto other = regionManager_.GetAllocatedSize();
70
71 std::ostringstream oss;
72 oss << msg << "Current allocated: " << Pretty(from + to + young + old + other) << ". (from: " << Pretty(from)
73 << "(exempt: " << Pretty(exempt) << "), to: " << Pretty(to) << ", young: " << Pretty(young)
74 << ", old: " << Pretty(old) << ", other: " << Pretty(other) << ")";
75 VLOG(DEBUG, oss.str().c_str());
76 }
77
78 // used to dump a detailed information of all regions.
DumpAllRegionStats(const char * msg) const79 void RegionSpace::DumpAllRegionStats(const char* msg) const
80 {
81 VLOG(DEBUG, msg);
82 youngSpace_.DumpRegionStats();
83 oldSpace_.DumpRegionStats();
84 fromSpace_.DumpRegionStats();
85 toSpace_.DumpRegionStats();
86 regionManager_.DumpRegionStats();
87
88 size_t usedUnits = GetUsedUnitCount();
89 VLOG(DEBUG, "\tused units: %zu (%zu B)", usedUnits, usedUnits * RegionDesc::UNIT_SIZE);
90 }
91
TryAllocateOnce(size_t allocSize,AllocType allocType)92 HeapAddress RegionSpace::TryAllocateOnce(size_t allocSize, AllocType allocType)
93 {
94 if (UNLIKELY_CC(allocType == AllocType::READ_ONLY_OBJECT)) {
95 return regionManager_.AllocReadOnly(allocSize);
96 }
97 if (UNLIKELY_CC(allocSize >= RegionDesc::LARGE_OBJECT_DEFAULT_THRESHOLD)) {
98 return regionManager_.AllocLarge(allocSize);
99 }
100 if (UNLIKELY_CC(allocType == AllocType::PINNED_OBJECT)) {
101 return regionManager_.AllocPinned(allocSize);
102 }
103 AllocationBuffer* allocBuffer = AllocationBuffer::GetOrCreateAllocBuffer();
104 return allocBuffer->Allocate(allocSize, allocType);
105 }
106
ShouldRetryAllocation(size_t & tryTimes) const107 bool RegionSpace::ShouldRetryAllocation(size_t& tryTimes) const
108 {
109 {
110 // check STW request.
111 ScopedEnterSaferegion enterSaferegion(true);
112 }
113
114 if (!IsRuntimeThread() && tryTimes <= static_cast<size_t>(TryAllocationThreshold::RESCHEDULE)) {
115 return true;
116 } else if (tryTimes < static_cast<size_t>(TryAllocationThreshold::TRIGGER_OOM)) {
117 if (Heap::GetHeap().IsGcStarted()) {
118 ScopedEnterSaferegion enterSaferegion(false);
119 Heap::GetHeap().GetCollectorResources().WaitForGCFinish();
120 } else {
121 Heap::GetHeap().GetCollector().RequestGC(GC_REASON_HEU, false, GC_TYPE_FULL);
122 }
123 return true;
124 } else if (tryTimes == static_cast<size_t>(TryAllocationThreshold::TRIGGER_OOM)) {
125 if (!Heap::GetHeap().IsGcStarted()) {
126 VLOG(INFO, "gc is triggered for OOM");
127 Heap::GetHeap().GetCollector().RequestGC(GC_REASON_OOM, false, GC_TYPE_FULL);
128 } else {
129 ScopedEnterSaferegion enterSaferegion(false);
130 Heap::GetHeap().GetCollectorResources().WaitForGCFinish();
131 tryTimes--;
132 }
133 return true;
134 } else { //LCOV_EXCL_BR_LINE
135 Heap::throwOOM();
136 return false;
137 }
138 }
139
AllocOldRegion()140 uintptr_t RegionSpace::AllocOldRegion()
141 {
142 RegionDesc* region = regionManager_.TakeRegion(false, false);
143 ASSERT(region != nullptr);
144
145 GCPhase phase = Mutator::GetMutator()->GetMutatorPhase();
146 if (phase == GC_PHASE_ENUM || phase == GC_PHASE_MARK || phase == GC_PHASE_REMARK_SATB ||
147 phase == GC_PHASE_POST_MARK) {
148 region->SetMarkingLine();
149 } else if (phase == GC_PHASE_PRECOPY || phase == GC_PHASE_COPY || phase == GC_PHASE_FIX) {
150 region->SetMarkingLine();
151 region->SetCopyLine();
152 }
153
154 DLOG(REGION, "alloc small object region %p @0x%zx+%zu units[%zu+%zu, %zu) type %u",
155 region, region->GetRegionStart(), region->GetRegionSize(),
156 region->GetUnitIdx(), region->GetUnitCount(), region->GetUnitIdx() + region->GetUnitCount(),
157 region->GetRegionType());
158 oldSpace_.AddFullRegion(region);
159
160 uintptr_t start = region->GetRegionStart();
161 uintptr_t addr = region->Alloc(region->GetRegionEnd() - region->GetRegionAllocPtr());
162 ASSERT(addr != 0);
163
164 return start;
165 }
166
AllocPinnedRegion()167 uintptr_t RegionSpace::AllocPinnedRegion()
168 {
169 RegionDesc* region = regionManager_.TakeRegion(false, false);
170 ASSERT(region != nullptr);
171
172 GCPhase phase = Mutator::GetMutator()->GetMutatorPhase();
173 if (phase == GC_PHASE_ENUM || phase == GC_PHASE_MARK || phase == GC_PHASE_REMARK_SATB ||
174 phase == GC_PHASE_POST_MARK) {
175 region->SetMarkingLine();
176 } else if (phase == GC_PHASE_PRECOPY || phase == GC_PHASE_COPY || phase == GC_PHASE_FIX) {
177 region->SetMarkingLine();
178 region->SetCopyLine();
179 }
180
181 DLOG(REGION, "alloc pinned region @0x%zx+%zu type %u", region->GetRegionStart(),
182 region->GetRegionAllocatedSize(),
183 region->GetRegionType());
184 regionManager_.AddRecentPinnedRegion(region);
185
186 uintptr_t start = region->GetRegionStart();
187 uintptr_t addr = region->Alloc(region->GetRegionEnd() - region->GetRegionAllocPtr());
188 ASSERT(addr != 0);
189
190 return start;
191 }
192
AllocLargeRegion(size_t size)193 uintptr_t RegionSpace::AllocLargeRegion(size_t size)
194 {
195 return regionManager_.AllocLarge(size, false);
196 }
197
AllocJitFortRegion(size_t size)198 uintptr_t RegionSpace::AllocJitFortRegion(size_t size)
199 {
200 uintptr_t addr = regionManager_.AllocLarge(size, false);
201 os::PrctlSetVMA(reinterpret_cast<void *>(addr), size, "ArkTS Code");
202 regionManager_.MarkJitFortMemAwaitingInstall(reinterpret_cast<BaseObject*>(addr));
203 return addr;
204 }
205
Allocate(size_t size,AllocType allocType)206 HeapAddress RegionSpace::Allocate(size_t size, AllocType allocType)
207 {
208 size_t tryTimes = 0;
209 uintptr_t internalAddr = 0;
210 size_t allocSize = ToAllocatedSize(size);
211 do {
212 tryTimes++;
213 internalAddr = TryAllocateOnce(allocSize, allocType);
214 if (LIKELY_CC(internalAddr != 0)) {
215 break;
216 }
217 if (IsGcThread()) {
218 return 0; // it means gc doesn't have enough space to move this object.
219 }
220 if (!ShouldRetryAllocation(tryTimes)) {
221 break;
222 }
223 (void)sched_yield();
224 } while (true);
225 if (internalAddr == 0) {
226 return 0;
227 }
228 #if defined(COMMON_TSAN_SUPPORT)
229 Sanitizer::TsanAllocObject(reinterpret_cast<void *>(internalAddr), allocSize);
230 #endif
231 return internalAddr + HEADER_SIZE;
232 }
233
234 // Only used for serialization in which allocType and target memory should keep consistency.
AllocateNoGC(size_t size,AllocType allocType)235 HeapAddress RegionSpace::AllocateNoGC(size_t size, AllocType allocType)
236 {
237 bool allowGC = false;
238 uintptr_t internalAddr = 0;
239 size_t allocSize = ToAllocatedSize(size);
240 if (UNLIKELY_CC(allocType == AllocType::PINNED_OBJECT)) {
241 internalAddr = regionManager_.AllocPinned(allocSize, allowGC);
242 } else if (LIKELY_CC(allocType == AllocType::MOVEABLE_OBJECT || allocType == AllocType::MOVEABLE_OLD_OBJECT)) {
243 AllocationBuffer* allocBuffer = AllocationBuffer::GetOrCreateAllocBuffer();
244 internalAddr = allocBuffer->Allocate(allocSize, allocType);
245 } else { //LCOV_EXCL_BR_LINE
246 // Unreachable for serialization
247 UNREACHABLE_CC();
248 }
249 if (internalAddr == 0) {
250 return 0;
251 }
252 #if defined(COMMON_TSAN_SUPPORT)
253 Sanitizer::TsanAllocObject(reinterpret_cast<void *>(internalAddr), allocSize);
254 #endif
255 return internalAddr + HEADER_SIZE;
256 }
257
CopyRegion(RegionDesc * region)258 void RegionSpace::CopyRegion(RegionDesc* region)
259 {
260 LOGF_CHECK(region->IsFromRegion()) << "region type " << static_cast<uint8_t>(region->GetRegionType());
261 DLOG(COPY, "try forward region %p @0x%zx+%zu type %u, live bytes %u",
262 region, region->GetRegionStart(), region->GetRegionAllocatedSize(),
263 region->GetRegionType(), region->GetLiveByteCount());
264
265 if (region->GetLiveByteCount() == 0) {
266 return;
267 }
268
269 int32_t rawPointerCount = region->GetRawPointerObjectCount();
270 CHECK(rawPointerCount == 0);
271 Collector& collector = Heap::GetHeap().GetCollector();
272 bool forwarded = region->VisitLiveObjectsUntilFalse(
273 [&collector](BaseObject* obj) { return collector.ForwardObject(obj); });
274 if (!forwarded) {
275 DLOG(COPY, "failure to forward region %p @0x%zx+%zu units[%zu+%zu, %zu) type %u, %u live bytes",
276 region, region->GetRegionStart(), region->GetRegionAllocatedSize(),
277 region->GetUnitIdx(), region->GetUnitCount(), region->GetUnitIdx() + region->GetUnitCount(),
278 region->GetRegionType(), region->GetLiveByteCount());
279
280 fromSpace_.DeleteFromRegion(region);
281 // since this region is possibly partially-forwarded, treat it as to-region.
282 toSpace_.AddFullRegion(region);
283 }
284 }
285
Init(const RuntimeParam & param)286 void RegionSpace::Init(const RuntimeParam& param)
287 {
288 MemoryMap::Option opt = MemoryMap::DEFAULT_OPTIONS;
289 opt.tag = "region_heap";
290 size_t heapSize = param.heapParam.heapSize * KB;
291 maxGarbageCacheSize_ = param.gcParam.maxGarbageCacheSize;
292
293 #ifndef PANDA_TARGET_32
294 static constexpr uint64_t MAX_SUPPORT_CAPACITY = 4ULL * GB;
295 // 2: double heap size
296 LOGF_CHECK((heapSize / 2)<= MAX_SUPPORT_CAPACITY) << "Max support capacity 4G";
297 #endif
298
299 size_t totalSize = RegionManager::GetHeapMemorySize(heapSize);
300 size_t regionNum = RegionManager::GetHeapUnitCount(heapSize);
301 #if defined(COMMON_ASAN_SUPPORT)
302 // asan's memory alias technique needs a shareable page
303 opt.flags &= ~MAP_PRIVATE;
304 opt.flags |= MAP_SHARED;
305 DLOG(SANITIZER, "mmap flags set to 0x%x", opt.flags);
306 #endif
307 // this must succeed otherwise it won't return
308 map_ = MemoryMap::MapMemoryAlignInner4G(totalSize, totalSize, opt);
309
310 size_t metadataSize = RegionManager::GetMetadataSize(regionNum);
311 uintptr_t baseAddr = reinterpret_cast<uintptr_t>(map_->GetBaseAddr());
312 os::PrctlSetVMA(reinterpret_cast<void*>(baseAddr), metadataSize, "ArkTS Heap CMCGC Metadata");
313 os::PrctlSetVMA(reinterpret_cast<void*>(baseAddr + metadataSize), totalSize - metadataSize,
314 "ArkTS Heap CMCGC RegionHeap");
315
316 #if defined(COMMON_SANITIZER_SUPPORT)
317 Sanitizer::OnHeapAllocated(map->GetBaseAddr(), map->GetMappedSize());
318 #endif
319
320 HeapAddress metadata = reinterpret_cast<HeapAddress>(map_->GetBaseAddr());
321 fromSpace_.SetExemptedRegionThreshold(param.heapParam.exemptionThreshold);
322 regionManager_.Initialize(regionNum, metadata);
323 reservedStart_ = regionManager_.GetRegionHeapStart();
324 reservedEnd_ = reinterpret_cast<HeapAddress>(map_->GetMappedEndAddr());
325 #if defined(COMMON_DUMP_ADDRESS)
326 VLOG(DEBUG, "region metadata@%zx, heap @[0x%zx+%zu, 0x%zx)", metadata, reservedStart, reservedEnd - reservedStart,
327 reservedEnd);
328 #endif
329 Heap::OnHeapCreated(reservedStart_);
330 Heap::OnHeapExtended(reservedEnd_);
331 }
332
GetOrCreateAllocBuffer()333 AllocationBuffer* AllocationBuffer::GetOrCreateAllocBuffer()
334 {
335 auto* buffer = AllocationBuffer::GetAllocBuffer();
336 if (buffer == nullptr) {
337 buffer = new (std::nothrow) AllocationBuffer();
338 LOGF_CHECK(buffer != nullptr) << "new region alloc buffer fail";
339 buffer->Init();
340 ThreadLocal::SetAllocBuffer(buffer);
341 }
342 return buffer;
343 }
ClearThreadLocalRegion()344 void AllocationBuffer::ClearThreadLocalRegion()
345 {
346 if (LIKELY_CC(tlRegion_ != RegionDesc::NullRegion())) {
347 RegionSpace& heap = reinterpret_cast<RegionSpace&>(Heap::GetHeap().GetAllocator());
348 heap.HandleFullThreadLocalRegion<AllocBufferType::YOUNG>(tlRegion_);
349 tlRegion_ = RegionDesc::NullRegion();
350 }
351 if (LIKELY_CC(tlOldRegion_ != RegionDesc::NullRegion())) {
352 RegionSpace& heap = reinterpret_cast<RegionSpace&>(Heap::GetHeap().GetAllocator());
353 heap.HandleFullThreadLocalRegion<AllocBufferType::OLD>(tlOldRegion_);
354 tlOldRegion_ = RegionDesc::NullRegion();
355 }
356 if (LIKELY_CC(tlToRegion_ != RegionDesc::NullRegion())) {
357 RegionSpace& heap = reinterpret_cast<RegionSpace&>(Heap::GetHeap().GetAllocator());
358 heap.HandleFullThreadLocalRegion<AllocBufferType::TO>(tlToRegion_);
359 tlToRegion_ = RegionDesc::NullRegion();
360 }
361 }
362
Unregister()363 void AllocationBuffer::Unregister()
364 {
365 Heap::GetHeap().UnregisterAllocBuffer(*this);
366 }
367
GetAllocBuffer()368 AllocationBuffer* AllocationBuffer::GetAllocBuffer() { return ThreadLocal::GetAllocBuffer(); }
369
~AllocationBuffer()370 AllocationBuffer::~AllocationBuffer()
371 {
372 ClearThreadLocalRegion();
373 }
374
Init()375 void AllocationBuffer::Init()
376 {
377 static_assert(offsetof(AllocationBuffer, tlRegion_) == 0,
378 "need to modify the offset of this value in llvm-project at the same time");
379 tlRegion_ = RegionDesc::NullRegion();
380 tlOldRegion_ = RegionDesc::NullRegion();
381 Heap::GetHeap().RegisterAllocBuffer(*this);
382 }
383
ToSpaceAllocate(size_t totalSize)384 HeapAddress AllocationBuffer::ToSpaceAllocate(size_t totalSize)
385 {
386 HeapAddress addr = 0;
387 if (LIKELY_CC(tlToRegion_ != RegionDesc::NullRegion())) {
388 addr = tlToRegion_->Alloc(totalSize);
389 }
390
391 if (UNLIKELY_CC(addr == 0)) {
392 RegionSpace& heapSpace = reinterpret_cast<RegionSpace&>(Heap::GetHeap().GetAllocator());
393
394 heapSpace.HandleFullThreadLocalRegion<AllocBufferType::TO>(tlToRegion_);
395 tlToRegion_ = RegionDesc::NullRegion();
396
397 RegionDesc* r = heapSpace.AllocateThreadLocalRegion<AllocBufferType::TO>(false);
398 if (UNLIKELY_CC(r == nullptr)) {
399 return 0;
400 }
401
402 tlToRegion_ = r;
403 addr = tlToRegion_->Alloc(totalSize);
404 }
405
406 DLOG(ALLOC, "alloc to 0x%zx(%zu)", addr, totalSize);
407 return addr;
408 }
409
Allocate(size_t totalSize,AllocType allocType)410 HeapAddress AllocationBuffer::Allocate(size_t totalSize, AllocType allocType)
411 {
412 // a hoisted specific fast path which can be inlined
413 HeapAddress addr = 0;
414 if (UNLIKELY_CC(allocType == AllocType::RAW_POINTER_OBJECT)) {
415 return AllocateRawPointerObject(totalSize);
416 }
417
418 ASSERT_LOGF(allocType == AllocType::MOVEABLE_OBJECT || allocType == AllocType::MOVEABLE_OLD_OBJECT,
419 "unexpected alloc type");
420
421 if (allocType == AllocType::MOVEABLE_OBJECT) {
422 if (LIKELY_CC(tlRegion_ != RegionDesc::NullRegion())) {
423 addr = tlRegion_->Alloc(totalSize);
424 }
425 } else if (allocType == AllocType::MOVEABLE_OLD_OBJECT) {
426 if (LIKELY_CC(tlOldRegion_ != RegionDesc::NullRegion())) {
427 addr = tlOldRegion_->Alloc(totalSize);
428 }
429 }
430
431 if (UNLIKELY_CC(addr == 0)) {
432 addr = AllocateImpl(totalSize, allocType);
433 }
434
435 DLOG(ALLOC, "alloc 0x%zx(%zu)", addr, totalSize);
436 return addr;
437 }
438
439 // try an allocation but do not handle failure
AllocateImpl(size_t totalSize,AllocType allocType)440 HeapAddress AllocationBuffer::AllocateImpl(size_t totalSize, AllocType allocType)
441 {
442 RegionSpace& heapSpace = reinterpret_cast<RegionSpace&>(Heap::GetHeap().GetAllocator());
443
444 // allocate new thread local region and try alloc
445 if (allocType == AllocType::MOVEABLE_OBJECT) {
446 heapSpace.HandleFullThreadLocalRegion<AllocBufferType::YOUNG>(tlRegion_);
447 tlRegion_ = RegionDesc::NullRegion();
448
449 RegionDesc* r = heapSpace.AllocateThreadLocalRegion<AllocBufferType::YOUNG>(false);
450 if (UNLIKELY_CC(r == nullptr)) {
451 return 0;
452 }
453
454 tlRegion_ = r;
455 return tlRegion_->Alloc(totalSize);
456 } else if (allocType == AllocType::MOVEABLE_OLD_OBJECT) {
457 heapSpace.HandleFullThreadLocalRegion<AllocBufferType::OLD>(tlOldRegion_);
458 tlOldRegion_ = RegionDesc::NullRegion();
459
460 RegionDesc* r = heapSpace.AllocateThreadLocalRegion<AllocBufferType::OLD>(false);
461 if (UNLIKELY_CC(r == nullptr)) {
462 return 0;
463 }
464
465 tlOldRegion_ = r;
466 return tlOldRegion_->Alloc(totalSize);
467 }
468 UNREACHABLE();
469 }
470
AllocateRawPointerObject(size_t totalSize)471 HeapAddress AllocationBuffer::AllocateRawPointerObject(size_t totalSize)
472 {
473 RegionDesc* region = tlRawPointerRegions_.GetHeadRegion();
474 if (region != nullptr) {
475 HeapAddress allocAddr = region->Alloc(totalSize);
476 if (allocAddr != 0) {
477 return allocAddr;
478 }
479 }
480 RegionManager& manager = reinterpret_cast<RegionSpace&>(Heap::GetHeap().GetAllocator()).GetRegionManager();
481 size_t needRegionNum = totalSize / RegionDesc::UNIT_SIZE + 1;
482 // region should have at least 2 unit
483 needRegionNum = (needRegionNum == 1) ? 2 : needRegionNum;
484 region = manager.TakeRegion(needRegionNum, RegionDesc::UnitRole::SMALL_SIZED_UNITS);
485 if (region == nullptr) {
486 return 0;
487 }
488 // region is enough for totalSize.
489 HeapAddress allocAddr = region->Alloc(totalSize);
490 ASSERT_LOGF(allocAddr != 0, "allocation failure");
491 tlRawPointerRegions_.PrependRegion(region, RegionDesc::RegionType::TL_RAW_POINTER_REGION);
492 return allocAddr;
493 }
494
495 #ifndef NDEBUG
IsHeapObject(HeapAddress addr) const496 bool RegionSpace::IsHeapObject(HeapAddress addr) const
497 {
498 if (!IsHeapAddress(addr)) {
499 return false;
500 }
501 return true;
502 }
503 #endif
FeedHungryBuffers()504 void RegionSpace::FeedHungryBuffers()
505 {
506 ScopedObjectAccess soa;
507 AllocBufferManager::HungryBuffers hungryBuffers;
508 allocBufferManager_->SwapHungryBuffers(hungryBuffers);
509 RegionDesc* region = nullptr;
510 for (auto* buffer : hungryBuffers) {
511 if (buffer->GetPreparedRegion() != nullptr) { continue; }
512 if (region == nullptr) {
513 region = AllocateThreadLocalRegion<AllocBufferType::YOUNG>(true);
514 if (region == nullptr) { return; }
515 }
516 if (buffer->SetPreparedRegion(region)) {
517 region = nullptr;
518 }
519 }
520 if (region != nullptr) {
521 regionManager_.CollectRegion(region);
522 }
523 }
524
MarkRememberSet(const std::function<void (BaseObject *)> & func)525 void RegionSpace::MarkRememberSet(const std::function<void(BaseObject*)>& func)
526 {
527 oldSpace_.MarkRememberSet(func);
528 regionManager_.MarkRememberSet(func);
529 }
530 } // namespace common
531