/** * Copyright (c) 2021-2022 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef PANDA_RUNTIME_MEM_REGION_ALLOCATOR_INL_H #define PANDA_RUNTIME_MEM_REGION_ALLOCATOR_INL_H #include "libpandabase/mem/mem.h" #include "libpandabase/utils/logger.h" #include "runtime/include/runtime.h" #include "runtime/include/thread.h" #include "runtime/include/gc_task.h" #include "runtime/mem/region_allocator.h" #include "runtime/mem/region_space-inl.h" #include "runtime/mem/runslots_allocator-inl.h" #include "runtime/mem/freelist_allocator-inl.h" #include "runtime/mem/alloc_config.h" #include "runtime/arch/memory_helpers.h" namespace panda::mem { template RegionAllocatorBase::RegionAllocatorBase(MemStatsType *mem_stats, GenerationalSpaces *spaces, SpaceType space_type, AllocatorType allocator_type, size_t init_space_size, bool extend, size_t region_size) : mem_stats_(mem_stats), space_type_(space_type), spaces_(spaces), region_pool_(region_size, extend, spaces, InternalAllocatorPtr(InternalAllocator<>::GetInternalAllocatorFromRuntime())), region_space_(space_type, allocator_type, ®ion_pool_), init_block_(0, nullptr) { ASSERT(space_type_ == SpaceType::SPACE_TYPE_OBJECT || space_type_ == SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT || space_type_ == SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT); init_block_ = NULLPOOL; if (init_space_size > 0) { ASSERT(init_space_size % region_size == 0); init_block_ = spaces_->AllocSharedPool(init_space_size, space_type, AllocatorType::REGION_ALLOCATOR, this); ASSERT(init_block_.GetMem() != nullptr); ASSERT(init_block_.GetSize() >= init_space_size); if (init_block_.GetMem() != nullptr) { region_pool_.InitRegionBlock(ToUintPtr(init_block_.GetMem()), ToUintPtr(init_block_.GetMem()) + init_space_size); ASAN_POISON_MEMORY_REGION(init_block_.GetMem(), init_block_.GetSize()); } } } template RegionAllocatorBase::RegionAllocatorBase(MemStatsType *mem_stats, GenerationalSpaces *spaces, SpaceType space_type, AllocatorType allocator_type, RegionPool *shared_region_pool) : mem_stats_(mem_stats), spaces_(spaces), space_type_(space_type), region_pool_(0, false, spaces, nullptr), // unused region_space_(space_type, allocator_type, shared_region_pool), init_block_(0, nullptr) // unused { ASSERT(space_type_ == SpaceType::SPACE_TYPE_OBJECT || space_type_ == SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT); } template template Region *RegionAllocatorBase::CreateAndSetUpNewRegion(size_t region_size, RegionFlag region_type, RegionFlag properties) { Region *region = AllocRegion(region_size, region_type, properties); if (LIKELY(region != nullptr)) { if (region_type == RegionFlag::IS_EDEN) { AllocConfigT::OnInitYoungRegion({region->Begin(), region->End()}); } // Do memory barrier here to make sure all threads see references to bitmaps. // The situation: // A mutator thread allocates a new object. During object allocation the mutator // allocates a new region, sets up the region header, allocates object in the region and publishes // the reference to the object. // GC thread does concurrent marking. It sees the reference to the new object and gets the region // by the object address. // Since GC thread doesn't locks region_lock_ we need to do memory barrier here to make // sure GC thread sees all bitmaps from the region header. arch::FullMemoryBarrier(); // Getting region by object is a bit operation and TSAN doesn't // sees the relation between region creation and region access. // This annotation suggests TSAN that this code always executes before // the region will be accessed. // See the corresponding annotation in ObjectToRegion TSAN_ANNOTATE_HAPPENS_BEFORE(region); } return region; } template PandaVector RegionAllocatorBase::GetAllRegions() { PandaVector vector; os::memory::LockHolder lock(this->region_lock_); GetSpace()->IterateRegions([&](Region *region) { vector.push_back(region); }); return vector; } template RegionAllocator::RegionAllocator(MemStatsType *mem_stats, GenerationalSpaces *spaces, SpaceType space_type, size_t init_space_size, bool extend) : RegionAllocatorBase(mem_stats, spaces, space_type, AllocatorType::REGION_ALLOCATOR, init_space_size, extend, REGION_SIZE), full_region_(nullptr, 0, 0), eden_current_region_(&full_region_) { } template RegionAllocator::RegionAllocator(MemStatsType *mem_stats, GenerationalSpaces *spaces, SpaceType space_type, RegionPool *shared_region_pool) : RegionAllocatorBase(mem_stats, spaces, space_type, AllocatorType::REGION_ALLOCATOR, shared_region_pool), full_region_(nullptr, 0, 0), eden_current_region_(&full_region_) { } template template void *RegionAllocator::AllocRegular(size_t align_size) { static constexpr bool is_atomic = std::is_same_v; // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) if constexpr (region_type == RegionFlag::IS_EDEN) { void *mem = GetCurrentRegion()->template Alloc(align_size); if (mem != nullptr) { return mem; } os::memory::LockHolder lock(this->region_lock_); mem = GetCurrentRegion()->template Alloc(align_size); if (mem != nullptr) { return mem; } Region *region = this->template CreateAndSetUpNewRegion(REGION_SIZE, region_type); if (LIKELY(region != nullptr)) { mem = region->template Alloc(align_size); SetCurrentRegion(region); } return mem; } // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) if constexpr (region_type == RegionFlag::IS_OLD) { void *mem = nullptr; Region *region_to = PopFromRegionQueue(); if (region_to != nullptr) { mem = region_to->template Alloc(align_size); if (mem != nullptr) { PushToRegionQueue(region_to); return mem; } } os::memory::LockHolder lock(this->region_lock_); region_to = this->template CreateAndSetUpNewRegion(REGION_SIZE, region_type); if (LIKELY(region_to != nullptr)) { mem = region_to->template Alloc(align_size); PushToRegionQueue(region_to); } return mem; } UNREACHABLE(); return nullptr; } template template void *RegionAllocator::Alloc(size_t size, Alignment align) { ASSERT(GetAlignmentInBytes(align) % GetAlignmentInBytes(DEFAULT_ALIGNMENT) == 0); size_t align_size = AlignUp(size, GetAlignmentInBytes(align)); void *mem = nullptr; // for movable & regular size object, allocate it from a region // for nonmovable or large size object, allocate a seprate large region for it if (this->GetSpaceType() != SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT && LIKELY(align_size <= GetMaxRegularObjectSize())) { mem = AllocRegular(align_size); } else { os::memory::LockHolder lock(this->region_lock_); Region *region = this->template CreateAndSetUpNewRegion( Region::RegionSize(align_size, REGION_SIZE), region_type, IS_LARGE_OBJECT); if (LIKELY(region != nullptr)) { mem = region->Alloc(align_size); } } if (mem != nullptr) { // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) if constexpr (update_memstats) { AllocConfigT::OnAlloc(align_size, this->space_type_, this->mem_stats_); AllocConfigT::MemoryInit(mem, size); } } return mem; } template TLAB *RegionAllocator::CreateNewTLAB([[maybe_unused]] panda::ManagedThread *thread, size_t size) { ASSERT(size <= GetMaxRegularObjectSize()); ASSERT(AlignUp(size, GetAlignmentInBytes(DEFAULT_ALIGNMENT)) == size); TLAB *tlab = nullptr; { os::memory::LockHolder lock(this->region_lock_); Region *region = nullptr; // first search in partial tlab map // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) if constexpr (USE_PARTIAL_TLAB) { auto largest_tlab = retained_tlabs_.begin(); if (largest_tlab != retained_tlabs_.end() && largest_tlab->first >= size) { LOG(DEBUG, ALLOC) << "Use retained tlabs region " << region; region = largest_tlab->second; retained_tlabs_.erase(largest_tlab); ASSERT(region->HasFlag(RegionFlag::IS_EDEN)); } } // allocate a free region if none partial tlab has enough space if (region == nullptr) { region = this->template CreateAndSetUpNewRegion(REGION_SIZE, RegionFlag::IS_EDEN); if (LIKELY(region != nullptr)) { region->CreateTLABSupport(); } } if (region != nullptr) { // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) if constexpr (!USE_PARTIAL_TLAB) { // We don't reuse the same region for different TLABs. // Therefore, update the size size = region->GetRemainingSizeForTLABs(); } tlab = region->CreateTLAB(size); ASSERT(tlab != nullptr); ASAN_UNPOISON_MEMORY_REGION(tlab->GetStartAddr(), tlab->GetSize()); AllocConfigT::MemoryInit(tlab->GetStartAddr(), tlab->GetSize()); ASAN_POISON_MEMORY_REGION(tlab->GetStartAddr(), tlab->GetSize()); LOG(DEBUG, ALLOC) << "Found a region " << region << " and create tlab " << tlab << " with memory starts at " << tlab->GetStartAddr() << " and with size " << tlab->GetSize(); // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) if constexpr (USE_PARTIAL_TLAB) { auto remaining_size = region->GetRemainingSizeForTLABs(); if (remaining_size >= size) { LOG(DEBUG, ALLOC) << "Add a region " << region << " with remained size " << remaining_size << " to retained_tlabs"; retained_tlabs_.insert(std::make_pair(remaining_size, region)); } } } } return tlab; } template // TODO(agrebenkin) add set of flags from which to pick the regions and make it compile time template PandaVector RegionAllocator::GetTopGarbageRegions(size_t region_count) { PandaPriorityQueue> queue; this->GetSpace()->IterateRegions([&](Region *region) { if (region->HasFlag(IS_EDEN)) { return; } if constexpr (!include_current_region) { if (IsInCurrentRegion(region)) { return; } } auto garbage_bytes = region->GetGarbageBytes(); queue.push(std::pair(garbage_bytes, region)); }); PandaVector regions; for (size_t i = 0; i < region_count && !queue.empty(); i++) { auto *region = queue.top().second; regions.push_back(region); queue.pop(); } return regions; } template template PandaVector RegionAllocator::GetAllSpecificRegions() { PandaVector vector; this->GetSpace()->IterateRegions([&](Region *region) { if (region->HasFlag(regions_type)) { vector.push_back(region); } }); return vector; } template template void RegionAllocator::CompactAllSpecificRegions(const GCObjectVisitor &death_checker, const ObjectVisitorEx &move_handler) { // NOLINTNEXTLINE(readability-braces-around-statements) if constexpr (regions_type_from == regions_type_to) { // NOLINT(bugprone-suspicious-semicolon) // TODO(aemelenko): Implement it if need to call this method with the same regions type. // There is an issue with IterateRegions during creating a new one. ASSERT(regions_type_from != regions_type_to); ResetCurrentRegion(); } this->GetSpace()->IterateRegions([&](Region *region) { if (!region->HasFlag(regions_type_from)) { return; } CompactSpecificRegion(region, death_checker, move_handler); }); } template template void RegionAllocator::CompactSeveralSpecificRegions(const PandaVector ®ions, const GCObjectVisitor &death_checker, const ObjectVisitorEx &move_handler) { for (auto i : regions) { // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) if constexpr (regions_type_from == regions_type_to) { [[maybe_unused]] bool founded_region = IsInCurrentRegion(i); ASSERT(!founded_region); } CompactSpecificRegion(i, death_checker, move_handler); } } template template void RegionAllocator::CompactSpecificRegion(Region *region, const GCObjectVisitor &death_checker, const ObjectVisitorEx &move_handler) { // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) if constexpr (regions_type_from == regions_type_to) { // It is bad if we compact one region into itself. [[maybe_unused]] bool is_current_region = IsInCurrentRegion(region); ASSERT(!is_current_region); } auto create_new_region = [&]() { os::memory::LockHolder lock(this->region_lock_); Region *region_to = this->template CreateAndSetUpNewRegion(REGION_SIZE, regions_type_to); ASSERT(region_to != nullptr); return region_to; }; Region *region_to = PopFromRegionQueue(); if (region_to == nullptr) { region_to = create_new_region(); } size_t live_bytes = 0; // Don't use atomic in this method because we work with not shared region auto visitor = [&](ObjectHeader *object) { if (death_checker(object) == ObjectStatus::ALIVE_OBJECT) { size_t object_size = GetObjectSize(object); size_t aligned_size = AlignUp(object_size, DEFAULT_ALIGNMENT_IN_BYTES); void *dst = region_to->template Alloc(aligned_size); if (dst == nullptr) { region_to->SetLiveBytes(region_to->GetLiveBytes() + live_bytes); live_bytes = 0; region_to = create_new_region(); dst = region_to->template Alloc(aligned_size); } // Don't initialize memory for an object here because we will use memcpy anyway ASSERT(dst != nullptr); memcpy_s(dst, object_size, object, object_size); // need to mark as alive moved object ASSERT(region_to->GetLiveBitmap() != nullptr); region_to->IncreaseAllocatedObjects(); region_to->GetLiveBitmap()->Set(dst); live_bytes += aligned_size; move_handler(object, static_cast(dst)); } }; ASSERT(region->HasFlag(regions_type_from)); const std::function visitor_functor(visitor); // NOLINTNEXTLINE(readability-braces-around-statements) if constexpr (use_marked_bitmap) { // TODO(grebenkin): use live bitmap, remove CloneMarkBitmapToLiveBitmap, beware of young-regions region->GetMarkBitmap()->IterateOverMarkedChunks( [&](void *object_addr) { visitor_functor(static_cast(object_addr)); }); } else { // NOLINT(readability-misleading-indentation) region->IterateOverObjects(visitor_functor); } region_to->SetLiveBytes(region_to->GetLiveBytes() + live_bytes); PushToRegionQueue(region_to); } template template void RegionAllocator::PromoteYoungRegion(Region *region, const GCObjectVisitor &death_checker, const ObjectVisitor &alive_objects_handler) { ASSERT(region->HasFlag(RegionFlag::IS_EDEN)); // We should do it here, because we don't create a live bitmap during young regions creation region->CreateLiveBitmap(); size_t live_bytes = 0; auto visitor = [&](ObjectHeader *object) { if (death_checker(object) == ObjectStatus::ALIVE_OBJECT) { alive_objects_handler(object); region->IncreaseAllocatedObjects(); region->GetLiveBitmap()->Set(object); live_bytes += GetAlignedObjectSize(GetObjectSize(object)); } }; // NOLINTNEXTLINE(readability-braces-around-statements) if constexpr (use_marked_bitmap) { region->GetMarkBitmap()->IterateOverMarkedChunks( [&](void *object_addr) { visitor(static_cast(object_addr)); }); } else { // NOLINT(readability-misleading-indentation) region->IterateOverObjects(visitor); } region->SetLiveBytes(live_bytes); this->GetSpace()->PromoteYoungRegion(region); } template template void RegionAllocator::ResetAllSpecificRegions() { ResetCurrentRegion(); this->GetSpace()->IterateRegions([&](Region *region) { if (!region->HasFlag(regions_type)) { return; } this->GetSpace()->FreeRegion(region); }); if constexpr (regions_type == RegionFlag::IS_EDEN) { retained_tlabs_.clear(); } } template template void RegionAllocator::ResetSeveralSpecificRegions(const PandaVector ®ions) { // TODO(aemelenko): If we need to reset several young regions, we should implement it. ASSERT(regions_type != RegionFlag::IS_EDEN); ASSERT((regions_type != RegionFlag::IS_EDEN) || (retained_tlabs_.empty())); for (auto i : regions) { [[maybe_unused]] bool is_current_regions = IsInCurrentRegion(i); ASSERT(!is_current_regions); ASSERT(i->HasFlag(regions_type)); this->GetSpace()->FreeRegion(i); } } template RegionNonmovableAllocator::RegionNonmovableAllocator( MemStatsType *mem_stats, GenerationalSpaces *spaces, SpaceType space_type, size_t init_space_size, bool extend) : RegionAllocatorBase(mem_stats, spaces, space_type, ObjectAllocator::GetAllocatorType(), init_space_size, extend, REGION_SIZE), object_allocator_(mem_stats, space_type) { } template RegionNonmovableAllocator::RegionNonmovableAllocator( MemStatsType *mem_stats, GenerationalSpaces *spaces, SpaceType space_type, RegionPool *shared_region_pool) : RegionAllocatorBase(mem_stats, spaces, space_type, ObjectAllocator::GetAllocatorType(), shared_region_pool), object_allocator_(mem_stats, space_type) { } template void *RegionNonmovableAllocator::Alloc(size_t size, Alignment align) { ASSERT(GetAlignmentInBytes(align) % GetAlignmentInBytes(DEFAULT_ALIGNMENT) == 0); size_t align_size = AlignUp(size, GetAlignmentInBytes(align)); ASSERT(align_size <= ObjectAllocator::GetMaxSize()); void *mem = object_allocator_.Alloc(align_size); if (UNLIKELY(mem == nullptr)) { mem = NewRegionAndRetryAlloc(size, align); if (UNLIKELY(mem == nullptr)) { return nullptr; } } auto live_bitmap = this->GetRegion(reinterpret_cast(mem))->GetLiveBitmap(); ASSERT(live_bitmap != nullptr); live_bitmap->AtomicTestAndSet(mem); return mem; } template void RegionNonmovableAllocator::Free(void *mem) { this->GetRegion(reinterpret_cast(mem))->GetLiveBitmap()->AtomicTestAndClear(mem); object_allocator_.Free(mem); } template void RegionNonmovableAllocator::Collect( const GCObjectVisitor &death_checker) { os::memory::LockHolder lock(this->region_lock_); object_allocator_.Collect(death_checker); } template void RegionNonmovableAllocator::VisitAndRemoveFreeRegions( const RegionsVisitor ®ion_visitor) { os::memory::LockHolder lock(this->region_lock_); // Add free region into vector to not do extra work with region_visitor // inside object_allocator_. PandaVector free_regions; object_allocator_.VisitAndRemoveFreePools([&free_regions](void *mem, [[maybe_unused]] size_t size) { auto *region = AddrToRegion(mem); ASSERT(ToUintPtr(mem) + size == region->End()); // We don't remove this region here, because don't want to do some extra work with visitor here. free_regions.push_back(region); }); if (!free_regions.empty()) { region_visitor(free_regions); for (auto i : free_regions) { this->GetSpace()->FreeRegion(i); } } } template void *RegionNonmovableAllocator::NewRegionAndRetryAlloc(size_t object_size, Alignment align) { os::memory::LockHolder lock(this->region_lock_); size_t pool_head_size = AlignUp(Region::HeadSize(), ObjectAllocator::PoolAlign()); ASSERT(AlignUp(pool_head_size + object_size, REGION_SIZE) == REGION_SIZE); while (true) { Region *region = this->template CreateAndSetUpNewRegion(REGION_SIZE, RegionFlag::IS_NONMOVABLE); if (UNLIKELY(region == nullptr)) { return nullptr; } ASSERT(region->GetLiveBitmap() != nullptr); uintptr_t aligned_pool = ToUintPtr(region) + pool_head_size; bool added_memory_pool = object_allocator_.AddMemoryPool(ToVoidPtr(aligned_pool), REGION_SIZE - pool_head_size); ASSERT(added_memory_pool); if (UNLIKELY(!added_memory_pool)) { LOG(FATAL, ALLOC) << "ObjectAllocator: couldn't add memory pool to allocator"; } void *mem = object_allocator_.Alloc(object_size, align); if (LIKELY(mem != nullptr)) { return mem; } } return nullptr; } template RegionHumongousAllocator::RegionHumongousAllocator(MemStatsType *mem_stats, GenerationalSpaces *spaces, SpaceType space_type) : RegionAllocatorBase(mem_stats, spaces, space_type, AllocatorType::REGION_ALLOCATOR, 0, true, REGION_SIZE) { } template template void *RegionHumongousAllocator::Alloc(size_t size, Alignment align) { ASSERT(GetAlignmentInBytes(align) % GetAlignmentInBytes(DEFAULT_ALIGNMENT) == 0); size_t align_size = AlignUp(size, GetAlignmentInBytes(align)); Region *region = nullptr; void *mem = nullptr; // allocate a seprate large region for object { os::memory::LockHolder lock(this->region_lock_); region = this->template CreateAndSetUpNewRegion(Region::RegionSize(align_size, REGION_SIZE), IS_OLD, IS_LARGE_OBJECT); if (LIKELY(region != nullptr)) { mem = region->Alloc(align_size); ASSERT(mem != nullptr); ASSERT(region->GetLiveBitmap() != nullptr); // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) if constexpr (update_memstats) { AllocConfigT::OnAlloc(region->Size(), this->space_type_, this->mem_stats_); AllocConfigT::MemoryInit(mem, size); } // Do it after memory init because we can reach this memory after setting live bitmap region->GetLiveBitmap()->AtomicTestAndSet(mem); } } return mem; } template void RegionHumongousAllocator::CollectAndRemoveFreeRegions( const RegionsVisitor ®ion_visitor, const GCObjectVisitor &death_checker) { // Add free region into vector to not do extra work with region_visitor during region iteration PandaVector free_regions; { os::memory::LockHolder lock(this->region_lock_); this->GetSpace()->IterateRegions([&](Region *region) { this->Collect(region, death_checker); if (region->HasFlag(IS_FREE)) { free_regions.push_back(region); } }); } if (!free_regions.empty()) { region_visitor(free_regions); for (auto i : free_regions) { os::memory::LockHolder lock(this->region_lock_); ResetRegion(i); } } } template void RegionHumongousAllocator::Collect(Region *region, const GCObjectVisitor &death_checker) { ASSERT(region->HasFlag(RegionFlag::IS_LARGE_OBJECT)); ObjectHeader *object_to_proceed = nullptr; object_to_proceed = region->GetLargeObject(); if (death_checker(object_to_proceed) == ObjectStatus::DEAD_OBJECT) { region->AddFlag(RegionFlag::IS_FREE); } } template void RegionHumongousAllocator::ResetRegion(Region *region) { ASSERT(region->HasFlag(RegionFlag::IS_FREE)); region->RmvFlag(RegionFlag::IS_FREE); this->GetSpace()->FreeRegion(region); } template using RegionRunslotsAllocator = RegionNonmovableAllocator>; template using RegionFreeListAllocator = RegionNonmovableAllocator>; } // namespace panda::mem #endif // PANDA_RUNTIME_MEM_REGION_ALLOCATOR_INL_H