1 /* 2 * Copyright (c) 2025 Huawei Device Co., Ltd. 3 * Licensed under the Apache License, Version 2.0 (the "License"); 4 * you may not use this file except in compliance with the License. 5 * You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software 10 * distributed under the License is distributed on an "AS IS" BASIS, 11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 * See the License for the specific language governing permissions and 13 * limitations under the License. 14 */ 15 16 #ifndef COMMON_COMPONENTS_HEAP_ALLOCATOR_REGION_MANAGER_H 17 #define COMMON_COMPONENTS_HEAP_ALLOCATOR_REGION_MANAGER_H 18 19 #include <list> 20 #include <map> 21 #include <set> 22 #include <thread> 23 #include <vector> 24 25 #include "common_components/common/run_type.h" 26 #include "common_components/heap/allocator/alloc_buffer.h" 27 #include "common_components/heap/allocator/allocator.h" 28 #include "common_components/heap/allocator/free_region_manager.h" 29 #include "common_components/heap/allocator/region_list.h" 30 #include "common_components/heap/allocator/fix_heap.h" 31 #include "common_components/heap/allocator/slot_list.h" 32 #include "common_components/common_runtime/hooks.h" 33 34 namespace common { 35 using JitFortUnProtHookType = void (*)(size_t size, void* base); 36 37 class MarkingCollector; 38 class CompactCollector; 39 class RegionManager; 40 class Taskpool; 41 // RegionManager needs to know header size and alignment in order to iterate objects linearly 42 // and thus its Alloc should be rewrite with AllocObj(objSize) 43 class RegionManager { 44 public: 45 constexpr static size_t FIXED_PINNED_REGION_COUNT = 128; 46 constexpr static size_t FIXED_PINNED_THRESHOLD = sizeof(uint64_t) * FIXED_PINNED_REGION_COUNT; 47 /* region memory layout: 48 1. some paddings memory to aligned 49 2. region info for each region, part of heap metadata 50 3. region space for allocation, i.e., the heap --- start address is aligend to `RegionDesc::UNIT_SIZE` 51 */ GetHeapMemorySize(size_t heapSize)52 static size_t GetHeapMemorySize(size_t heapSize) 53 { 54 size_t regionNum = GetHeapUnitCount(heapSize); 55 size_t metadataSize = GetMetadataSize(regionNum); 56 // Add one more `RegionDesc::UNIT_SIZE` totalSize, because we need the region address is aligned to 57 // `RegionDesc::UNIT_SIZE`, this need some paddings 58 size_t totalSize = metadataSize + RoundUp<size_t>(heapSize, RegionDesc::UNIT_SIZE) + RegionDesc::UNIT_SIZE; 59 return totalSize; 60 } 61 GetHeapUnitCount(size_t heapSize)62 static size_t GetHeapUnitCount(size_t heapSize) 63 { 64 heapSize = RoundUp<size_t>(heapSize, RegionDesc::UNIT_SIZE); 65 size_t regionNum = heapSize / RegionDesc::UNIT_SIZE; 66 return regionNum; 67 } 68 GetMetadataSize(size_t regionNum)69 static size_t GetMetadataSize(size_t regionNum) 70 { 71 size_t metadataSize = regionNum * sizeof(RegionDesc); 72 return RoundUp<size_t>(metadataSize, COMMON_PAGE_SIZE); 73 } 74 75 void CollectFixTasks(FixHeapTaskList& taskList); 76 void CollectFixHeapTaskForPinnedRegion(MarkingCollector& collector, RegionList& list, FixHeapTaskList& taskList); 77 78 void Initialize(size_t regionNum, uintptr_t regionInfoStart); 79 RegionManager()80 RegionManager() 81 : freeRegionManager_(*this), garbageRegionList_("garbage regions"), 82 pinnedRegionList_("pinned regions"), recentPinnedRegionList_("recent pinned regions"), 83 rawPointerRegionList_("raw pointer pinned regions"), largeRegionList_("large regions"), 84 recentLargeRegionList_("recent large regions"), readOnlyRegionList_("read only region"), 85 appSpawnRegionList_("appSpawn regions") 86 { 87 for (size_t i = 0; i < FIXED_PINNED_REGION_COUNT; i++) { 88 recentFixedPinnedRegionList_[i] = new RegionList("fixed recent pinned regions"); 89 fixedPinnedRegionList_[i] = new RegionList("fixed pinned regions"); 90 } 91 } 92 93 RegionManager(const RegionManager&) = delete; 94 95 RegionManager& operator=(const RegionManager&) = delete; 96 97 void FixFixedRegionList(MarkingCollector& collector, RegionList& list, size_t cellCount, GCStats& stats); 98 99 using RootSet = MarkStack<BaseObject*>; 100 101 #if defined(GCINFO_DEBUG) && GCINFO_DEBUG 102 void DumpRegionDesc() const; 103 #endif 104 105 void DumpRegionStats() const; 106 GetInactiveZone()107 uintptr_t GetInactiveZone() const { return inactiveZone_; } 108 GetRegionHeapStart()109 uintptr_t GetRegionHeapStart() const { return regionHeapStart_; } 110 GetFirstRegion()111 RegionDesc* GetFirstRegion() const 112 { 113 if (regionHeapStart_ < inactiveZone_) { 114 return RegionDesc::GetRegionDescAt(regionHeapStart_); 115 } 116 return nullptr; 117 } 118 ~RegionManager()119 ~RegionManager() 120 { 121 for (size_t i = 0; i < FIXED_PINNED_REGION_COUNT; i++) { 122 if (recentFixedPinnedRegionList_[i] != nullptr) { 123 delete recentFixedPinnedRegionList_[i]; 124 recentFixedPinnedRegionList_[i] = nullptr; 125 } 126 if (fixedPinnedRegionList_[i] != nullptr) { 127 delete fixedPinnedRegionList_[i]; 128 fixedPinnedRegionList_[i] = nullptr; 129 } 130 } 131 } 132 // take a region with *num* units for allocation 133 RegionDesc* TakeRegion(size_t num, RegionDesc::UnitRole, bool expectPhysicalMem = false, bool allowgc = true, 134 bool isCopy = false); 135 136 RegionDesc* TakeRegion(bool expectPhysicalMem, bool allowgc, bool isCopy = false) 137 { 138 return TakeRegion(1, RegionDesc::UnitRole::SMALL_SIZED_UNITS, expectPhysicalMem, allowgc, isCopy); 139 } 140 AddRecentPinnedRegion(RegionDesc * region)141 void AddRecentPinnedRegion(RegionDesc* region) 142 { 143 recentPinnedRegionList_.PrependRegion(region, RegionDesc::RegionType::RECENT_PINNED_REGION); 144 } 145 146 uintptr_t AllocPinnedFromFreeList(size_t size); 147 148 uintptr_t AllocReadOnly(size_t size, bool allowGC = true); 149 150 uintptr_t AllocPinned(size_t size, bool allowGC = true) 151 { 152 uintptr_t addr = 0; 153 if (!allowGC || size > FIXED_PINNED_THRESHOLD) { 154 DLOG(ALLOC, "alloc pinned obj 0x%zx(%zu)", addr, size); 155 return AllocNextFitPinned(size); 156 } 157 CHECK_CC(size % sizeof(uint64_t) == 0); 158 size_t cellCount = size / sizeof(uint64_t) - 1; 159 RegionList* list = recentFixedPinnedRegionList_[cellCount]; 160 std::mutex& listMutex = list->GetListMutex(); 161 listMutex.lock(); 162 RegionDesc* headRegion = list->GetHeadRegion(); 163 if (headRegion != nullptr) { 164 addr = headRegion->Alloc(size); 165 } 166 if (addr == 0) { 167 addr = AllocPinnedFromFreeList(cellCount); 168 } 169 if (addr == 0) { 170 RegionDesc* region = 171 TakeRegion(1, RegionDesc::UnitRole::SMALL_SIZED_UNITS, false, allowGC); 172 if (region == nullptr) { 173 listMutex.unlock(); 174 return 0; 175 } 176 DLOG(REGION, "alloc pinned region @0x%zx+%zu type %u", region->GetRegionStart(), 177 region->GetRegionAllocatedSize(), 178 region->GetRegionType()); 179 ASSERT(cellCount == static_cast<size_t>(static_cast<uint8_t>(cellCount))); 180 region->SetRegionCellCount(static_cast<uint8_t>(cellCount)); 181 GCPhase phase = Mutator::GetMutator()->GetMutatorPhase(); 182 if (phase == GC_PHASE_ENUM || phase == GC_PHASE_MARK || 183 phase == GC_PHASE_REMARK_SATB || phase == GC_PHASE_POST_MARK) { 184 region->SetMarkingLine(); 185 } else if (phase == GC_PHASE_PRECOPY || phase == GC_PHASE_COPY || phase == GC_PHASE_FIX) { 186 region->SetMarkingLine(); 187 region->SetCopyLine(); 188 } 189 // To make sure the allocedSize are consistent, it must prepend region first then alloc object. 190 list->PrependRegionLocked(region, RegionDesc::RegionType::FIXED_PINNED_REGION); 191 addr = region->Alloc(size); 192 } 193 DLOG(ALLOC, "alloc pinned obj 0x%zx(%zu)", addr, size); 194 listMutex.unlock(); 195 return addr; 196 } 197 198 uintptr_t AllocNextFitPinned(size_t size, bool allowGC = true) 199 { 200 uintptr_t addr = 0; 201 std::mutex& regionListMutex = recentPinnedRegionList_.GetListMutex(); 202 203 std::lock_guard<std::mutex> lock(regionListMutex); 204 RegionDesc* headRegion = recentPinnedRegionList_.GetHeadRegion(); 205 if (headRegion != nullptr) { 206 addr = headRegion->Alloc(size); 207 } 208 if (addr == 0) { 209 RegionDesc* region = 210 TakeRegion(1, RegionDesc::UnitRole::SMALL_SIZED_UNITS, false, allowGC); 211 if (region == nullptr) { 212 return 0; 213 } 214 DLOG(REGION, "alloc pinned region @0x%zx+%zu type %u", region->GetRegionStart(), 215 region->GetRegionAllocatedSize(), 216 region->GetRegionType()); 217 GCPhase phase = Mutator::GetMutator()->GetMutatorPhase(); 218 if (phase == GC_PHASE_ENUM || phase == GC_PHASE_MARK || phase == GC_PHASE_REMARK_SATB || 219 phase == GC_PHASE_POST_MARK) { 220 region->SetMarkingLine(); 221 } else if (phase == GC_PHASE_PRECOPY || phase == GC_PHASE_COPY || phase == GC_PHASE_FIX) { 222 region->SetMarkingLine(); 223 region->SetCopyLine(); 224 } 225 226 // To make sure the allocedSize are consistent, it must prepend region first then alloc object. 227 recentPinnedRegionList_.PrependRegionLocked(region, RegionDesc::RegionType::RECENT_PINNED_REGION); 228 addr = region->Alloc(size); 229 } 230 231 DLOG(ALLOC, "alloc pinned obj 0x%zx(%zu)", addr, size); 232 return addr; 233 } 234 235 // note: AllocSmall() is always performed by region owned by mutator thread 236 // thus no need to do in RegionManager 237 // caller assures size is truely large (> region size) 238 uintptr_t AllocLarge(size_t size, bool allowGC = true) 239 { 240 size_t alignedSize = AlignUp<size_t>(size + RegionDesc::UNIT_HEADER_SIZE, RegionDesc::UNIT_SIZE); 241 size_t regionCount = alignedSize / RegionDesc::UNIT_SIZE; 242 RegionDesc* region = TakeRegion(regionCount, RegionDesc::UnitRole::LARGE_SIZED_UNITS, false, allowGC); 243 if (region == nullptr) { 244 return 0; 245 } 246 GCPhase phase = Mutator::GetMutator()->GetMutatorPhase(); 247 if (phase == GC_PHASE_ENUM || phase == GC_PHASE_MARK || phase == GC_PHASE_REMARK_SATB || 248 phase == GC_PHASE_POST_MARK) { 249 region->SetMarkingLine(); 250 } else if (phase == GC_PHASE_PRECOPY || phase == GC_PHASE_COPY || phase == GC_PHASE_FIX) { 251 region->SetMarkingLine(); 252 region->SetCopyLine(); 253 } 254 255 DLOG(REGION, "alloc large region @0x%zx+%zu type %u", region->GetRegionStart(), 256 region->GetRegionSize(), region->GetRegionType()); 257 uintptr_t addr = region->Alloc(size); 258 ASSERT(addr > 0); 259 recentLargeRegionList_.PrependRegion(region, RegionDesc::RegionType::RECENT_LARGE_REGION); 260 return addr; 261 } 262 263 void CountLiveObject(const BaseObject* obj); 264 265 void AssembleLargeGarbageCandidates(); 266 void AssemblePinnedGarbageCandidates(); 267 ReassembleAppspawnSpace(RegionList & regionList)268 void ReassembleAppspawnSpace(RegionList& regionList) 269 { 270 appSpawnRegionList_.MergeRegionList(regionList, RegionDesc::RegionType::APPSPAWN_REGION); 271 } 272 CollectFromSpaceGarbage(RegionList & fromList)273 void CollectFromSpaceGarbage(RegionList& fromList) 274 { 275 garbageRegionList_.MergeRegionList(fromList, RegionDesc::RegionType::GARBAGE_REGION); 276 } 277 AddRawPointerRegion(RegionDesc * region)278 void AddRawPointerRegion(RegionDesc* region) 279 { 280 rawPointerRegionList_.PrependRegion(region, RegionDesc::RegionType::RAW_POINTER_REGION); 281 } 282 CollectRegion(RegionDesc * region)283 size_t CollectRegion(RegionDesc* region) 284 { 285 DLOG(REGION, "collect region %p@%#zx+%zu type %u", region, region->GetRegionStart(), 286 region->GetLiveByteCount(), region->GetRegionType()); 287 288 #ifdef USE_HWASAN 289 ASAN_POISON_MEMORY_REGION(reinterpret_cast<const volatile void *>(region->GetRegionBase()), 290 region->GetRegionBaseSize()); 291 const uintptr_t p_addr = region->GetRegionBase(); 292 const uintptr_t p_size = region->GetRegionBaseSize(); 293 LOG_COMMON(DEBUG) << std::hex << "set [" << p_addr << 294 std::hex << ", " << p_addr + p_size << ") poisoned\n"; 295 #endif 296 garbageRegionList_.PrependRegion(region, RegionDesc::RegionType::GARBAGE_REGION); 297 if (region->IsLargeRegion()) { 298 return region->GetRegionSize(); 299 } else { 300 return region->GetRegionSize() - region->GetLiveByteCount(); 301 } 302 } 303 304 void ReclaimRegion(RegionDesc* region); 305 size_t ReleaseRegion(RegionDesc* region); 306 ReclaimGarbageRegions()307 void ReclaimGarbageRegions() 308 { 309 RegionDesc* garbage = garbageRegionList_.TakeHeadRegion(); 310 while (garbage != nullptr) { 311 ReclaimRegion(garbage); 312 garbage = garbageRegionList_.TakeHeadRegion(); 313 } 314 } 315 316 size_t CollectLargeGarbage(); 317 318 // targetSize: size of memory which we do not release and keep it as cache for future allocation. ReleaseGarbageRegions(size_t targetSize)319 size_t ReleaseGarbageRegions(size_t targetSize) { return freeRegionManager_.ReleaseGarbageRegions(targetSize); } 320 321 void ForEachObjectUnsafe(const std::function<void(BaseObject*)>& visitor) const; 322 void ForEachObjectSafe(const std::function<void(BaseObject*)>& visitor) const; 323 void ForEachAwaitingJitFortUnsafe(const std::function<void(BaseObject*)>& visitor) const; 324 GetRecentAllocatedSize()325 size_t GetRecentAllocatedSize() const 326 { 327 return recentLargeRegionList_.GetAllocatedSize() + recentPinnedRegionList_.GetAllocatedSize(); 328 } 329 GetSurvivedSize()330 size_t GetSurvivedSize() const 331 { 332 return pinnedRegionList_.GetAllocatedSize() + largeRegionList_.GetAllocatedSize(); 333 } 334 GetUsedUnitCount()335 size_t GetUsedUnitCount() const 336 { 337 return largeRegionList_.GetUnitCount() + recentLargeRegionList_.GetUnitCount() + 338 pinnedRegionList_.GetUnitCount() + recentPinnedRegionList_.GetUnitCount() + 339 rawPointerRegionList_.GetUnitCount() + readOnlyRegionList_.GetUnitCount() + 340 appSpawnRegionList_.GetUnitCount(); 341 } 342 GetDirtyUnitCount()343 size_t GetDirtyUnitCount() const { return freeRegionManager_.GetDirtyUnitCount(); } 344 GetInactiveUnitCount()345 size_t GetInactiveUnitCount() const { return (regionHeapEnd_ - inactiveZone_) / RegionDesc::UNIT_SIZE; } GetActiveSize()346 size_t GetActiveSize() const { return inactiveZone_ - regionHeapStart_; } 347 GetLargeObjectSize()348 inline size_t GetLargeObjectSize() const 349 { 350 return largeRegionList_.GetAllocatedSize() + recentLargeRegionList_.GetAllocatedSize(); 351 } 352 GetAllocatedSize()353 size_t GetAllocatedSize() const 354 { 355 return largeRegionList_.GetAllocatedSize() + recentLargeRegionList_.GetAllocatedSize() + 356 GetPinnedSpaceSize() + rawPointerRegionList_.GetAllocatedSize() + 357 readOnlyRegionList_.GetAllocatedSize() + appSpawnRegionList_.GetAllocatedSize(); 358 } 359 GetPinnedSpaceSize()360 inline size_t GetPinnedSpaceSize() const 361 { 362 size_t pinnedSpaceSize = 363 pinnedRegionList_.GetAllocatedSize() + recentPinnedRegionList_.GetAllocatedSize(); 364 for (size_t i = 0; i < FIXED_PINNED_REGION_COUNT; i++) { 365 pinnedSpaceSize += recentFixedPinnedRegionList_[i]->GetAllocatedSize(); 366 pinnedSpaceSize += fixedPinnedRegionList_[i]->GetAllocatedSize(); 367 } 368 return pinnedSpaceSize; 369 } 370 GetNextNeighborRegion(RegionDesc * region)371 RegionDesc* GetNextNeighborRegion(RegionDesc* region) const 372 { 373 HeapAddress address = region->GetRegionEnd(); 374 if (address < inactiveZone_.load()) { 375 return RegionDesc::GetRegionDescAt(address); 376 } 377 return nullptr; 378 } 379 380 // this method checks whether allocation is permitted for now, otherwise, it is suspened 381 // until allocation does no harm to gc. 382 void RequestForRegion(size_t size); 383 PrepareMarking()384 void PrepareMarking() 385 { 386 AllocBufferVisitor visitor = [](AllocationBuffer& regionBuffer) { 387 RegionDesc* region = regionBuffer.GetRegion<AllocBufferType::YOUNG>(); 388 if (region != RegionDesc::NullRegion()) { 389 region->SetMarkingLine(); 390 } 391 region = regionBuffer.GetRegion<AllocBufferType::OLD>(); 392 if (region != RegionDesc::NullRegion()) { 393 region->SetMarkingLine(); 394 } 395 }; 396 Heap::GetHeap().GetAllocator().VisitAllocBuffers(visitor); 397 398 RegionDesc* pinRegion = recentPinnedRegionList_.GetHeadRegion(); 399 if (pinRegion != nullptr && pinRegion != RegionDesc::NullRegion()) { 400 pinRegion->SetMarkingLine(); 401 } 402 403 RegionDesc* readOnlyRegion = readOnlyRegionList_.GetHeadRegion(); 404 if (readOnlyRegion != nullptr && readOnlyRegion != RegionDesc::NullRegion()) { 405 readOnlyRegion->SetMarkingLine(); 406 } 407 408 for (size_t i = 0; i < FIXED_PINNED_REGION_COUNT; i++) { 409 RegionDesc* region = recentFixedPinnedRegionList_[i]->GetHeadRegion(); 410 if (region != nullptr && region != RegionDesc::NullRegion()) { 411 region->SetMarkingLine(); 412 } 413 } 414 } 415 PrepareForward()416 void PrepareForward() 417 { 418 AllocBufferVisitor visitor = [](AllocationBuffer& regionBuffer) { 419 RegionDesc* region = regionBuffer.GetRegion<AllocBufferType::YOUNG>(); 420 if (region != RegionDesc::NullRegion()) { 421 region->SetCopyLine(); 422 } 423 region = regionBuffer.GetRegion<AllocBufferType::OLD>(); 424 if (region != RegionDesc::NullRegion()) { 425 region->SetCopyLine(); 426 } 427 region = regionBuffer.GetRegion<AllocBufferType::OLD>(); 428 if (region != RegionDesc::NullRegion()) { 429 region->SetCopyLine(); 430 } 431 }; 432 Heap::GetHeap().GetAllocator().VisitAllocBuffers(visitor); 433 434 RegionDesc* pinRegion = recentPinnedRegionList_.GetHeadRegion(); 435 if (pinRegion != nullptr && pinRegion != RegionDesc::NullRegion()) { 436 pinRegion->SetCopyLine(); 437 } 438 439 RegionDesc* readOnlyRegion = readOnlyRegionList_.GetHeadRegion(); 440 if (readOnlyRegion != nullptr && readOnlyRegion != RegionDesc::NullRegion()) { 441 readOnlyRegion->SetCopyLine(); 442 } 443 444 for (size_t i = 0; i < FIXED_PINNED_REGION_COUNT; i++) { 445 RegionDesc* region = recentFixedPinnedRegionList_[i]->GetHeadRegion(); 446 if (region != nullptr && region != RegionDesc::NullRegion()) { 447 region->SetCopyLine(); 448 } 449 } 450 } 451 ClearAllGCInfo()452 void ClearAllGCInfo() 453 { 454 ClearGCInfo(largeRegionList_); 455 ClearGCInfo(recentLargeRegionList_); 456 ClearGCInfo(recentPinnedRegionList_); 457 ClearGCInfo(rawPointerRegionList_); 458 ClearGCInfo(pinnedRegionList_); 459 ClearGCInfo(readOnlyRegionList_); 460 ClearGCInfo(appSpawnRegionList_); 461 for (size_t i = 0; i < FIXED_PINNED_REGION_COUNT; i++) { 462 ClearGCInfo(*recentFixedPinnedRegionList_[i]); 463 ClearGCInfo(*fixedPinnedRegionList_[i]); 464 } 465 } 466 SetReadOnlyToRORegionList()467 void SetReadOnlyToRORegionList() 468 { 469 auto visitor = [](RegionDesc* region) { 470 if (region != nullptr) { 471 region->SetReadOnly(); 472 } 473 }; 474 readOnlyRegionList_.VisitAllRegions(visitor); 475 } 476 ClearReadOnlyFromRORegionList()477 void ClearReadOnlyFromRORegionList() 478 { 479 auto visitor = [](RegionDesc* region) { 480 if (region != nullptr) { 481 region->ClearReadOnly(); 482 } 483 }; 484 readOnlyRegionList_.VisitAllRegions(visitor); 485 } 486 487 void MarkRememberSet(const std::function<void(BaseObject*)>& func); 488 void ClearRSet(); 489 MarkJitFortMemInstalled(void * thread,BaseObject * obj)490 void MarkJitFortMemInstalled(void *thread, BaseObject *obj) 491 { 492 std::lock_guard guard(awaitingJitFortMutex_); 493 // GC is running, we should mark JitFort installled after GC finish 494 if (Heap::GetHeap().GetGCPhase() != GCPhase::GC_PHASE_IDLE) { 495 jitFortPostGCInstallTask_.emplace(nullptr, obj); 496 } else { 497 // a threadlocal JitFort mem 498 if (thread) { 499 MarkThreadLocalJitFortInstalled(thread, obj); 500 } else { 501 RegionDesc::GetAliveRegionDescAt(reinterpret_cast<uintptr_t>(obj))->SetJitFortAwaitInstallFlag(false); 502 } 503 awaitingJitFort_.erase(obj); 504 } 505 } 506 MarkJitFortMemAwaitingInstall(BaseObject * obj)507 void MarkJitFortMemAwaitingInstall(BaseObject *obj) 508 { 509 std::lock_guard guard(awaitingJitFortMutex_); 510 RegionDesc::GetAliveRegionDescAt(reinterpret_cast<uintptr_t>(obj))->SetJitFortAwaitInstallFlag(true); 511 awaitingJitFort_.insert(obj); 512 } 513 HandlePostGCJitFortInstallTask()514 void HandlePostGCJitFortInstallTask() 515 { 516 ASSERT(Heap::GetHeap().GetGCPhase() == GCPhase::GC_PHASE_IDLE); 517 while (!jitFortPostGCInstallTask_.empty()) { 518 auto [thread, machineCode] = jitFortPostGCInstallTask_.top(); 519 MarkJitFortMemInstalled(thread, machineCode); 520 jitFortPostGCInstallTask_.pop(); 521 } 522 } 523 524 private: 525 inline void TagHugePage(RegionDesc* region, size_t num) const; 526 inline void UntagHugePage(RegionDesc* region, size_t num) const; 527 ClearGCInfo(RegionList & list)528 void ClearGCInfo(RegionList& list) 529 { 530 RegionList tmp("temp region list"); 531 list.CopyListTo(tmp); 532 tmp.VisitAllRegions([](RegionDesc* region) { 533 region->ClearMarkingCopyLine(); 534 region->ClearLiveInfo(); 535 region->ResetMarkBit(); 536 }); 537 } 538 539 FreeRegionManager freeRegionManager_; 540 541 // region lists actually represent life cycle of regions. 542 // each region must belong to only one list at any time. 543 544 // cache for fromRegionList after forwarding. 545 RegionList garbageRegionList_; 546 547 // regions for small-sized object which is not movable. 548 RegionList pinnedRegionList_; 549 RegionList* fixedPinnedRegionList_[FIXED_PINNED_REGION_COUNT]; 550 551 // regions which allocated since last GC beginning. 552 // record pinned regions in here first and move those regions 553 // to pinned/fixedPinned RegionList when gc starts. 554 RegionList recentPinnedRegionList_; 555 RegionList* recentFixedPinnedRegionList_[FIXED_PINNED_REGION_COUNT]; 556 557 // region lists for small-sized raw-pointer objects (i.e. future, monitor) 558 // which can not be moved ever (even during compaction). 559 RegionList rawPointerRegionList_; // delete rawPointerRegion, use PinnedRegion 560 561 // regions for large-sized objects. 562 // large region is recorded here after large object is allocated. 563 RegionList largeRegionList_; 564 565 // large regions which allocated since last GC beginning. 566 // record pinned regions in here first and move those when gc starts. 567 RegionList recentLargeRegionList_; 568 569 // regions for read only objects 570 RegionList readOnlyRegionList_; 571 572 // regions for appspawn region list. 573 RegionList appSpawnRegionList_; 574 575 uintptr_t regionInfoStart_ = 0; // the address of first RegionDesc 576 577 uintptr_t regionHeapStart_ = 0; // the address of first region to allocate object 578 uintptr_t regionHeapEnd_ = 0; 579 580 // the time when previous region was allocated, which is assigned with returned value by timeutil::NanoSeconds(). 581 std::atomic<uint64_t> prevRegionAllocTime_ = { 0 }; 582 583 // heap space not allocated yet for even once. this value should not be decreased. 584 std::atomic<uintptr_t> inactiveZone_ = { 0 }; 585 // Awaiting JitFort object has no references from other objects, 586 // but we need to keep them as live untill jit compilation has finished installing. 587 std::set<BaseObject*> awaitingJitFort_; 588 std::stack<std::pair<void*, BaseObject*>> jitFortPostGCInstallTask_; 589 std::mutex awaitingJitFortMutex_; 590 591 friend class VerifyIterator; 592 }; 593 } // namespace common 594 595 #endif // COMMON_COMPONENTS_HEAP_ALLOCATOR_REGION_MANAGER_H 596