• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "mem/mem_pool.h"
16 #include "runtime/mem/region_space-inl.h"
17 #include "runtime/mem/rem_set-inl.h"
18 #include "runtime/include/runtime.h"
19 #include "runtime/include/panda_vm.h"
20 
21 namespace panda::mem {
22 
GetAllocatedBytes() const23 uint32_t Region::GetAllocatedBytes() const
24 {
25     if (!IsTLAB()) {
26         return top_ - begin_;
27     }
28     uint32_t allocatedBytes = 0;
29     ASSERT(tlabVector_ != nullptr);
30     for (auto i : *tlabVector_) {
31         allocatedBytes += i->GetOccupiedSize();
32     }
33     return allocatedBytes;
34 }
35 
GetFragmentation() const36 double Region::GetFragmentation() const
37 {
38     ASSERT(Size() >= GetAllocatedBytes());
39     return static_cast<double>(Size() - GetAllocatedBytes()) / Size();
40 }
41 
GetInternalAllocator()42 InternalAllocatorPtr Region::GetInternalAllocator()
43 {
44     return space_->GetPool()->GetInternalAllocator();
45 }
46 
CreateRemSet()47 void Region::CreateRemSet()
48 {
49     ASSERT(remSet_ == nullptr);
50     remSet_ = GetInternalAllocator()->New<RemSetT>();
51 }
52 
SetupAtomics()53 void Region::SetupAtomics()
54 {
55     liveBytes_ = GetInternalAllocator()->New<std::atomic<uint32_t>>();
56     pinnedObjects_ = GetInternalAllocator()->New<std::atomic<uint32_t>>();
57 }
58 
CreateTLABSupport()59 void Region::CreateTLABSupport()
60 {
61     ASSERT(tlabVector_ == nullptr);
62     tlabVector_ = GetInternalAllocator()->New<PandaVector<TLAB *>>(GetInternalAllocator()->Adapter());
63 }
64 
GetRemainingSizeForTLABs() const65 size_t Region::GetRemainingSizeForTLABs() const
66 {
67     ASSERT(IsTLAB());
68     // TLABs are stored one by one.
69     uintptr_t lastTlabEndByte = tlabVector_->empty() ? Top() : ToUintPtr(tlabVector_->back()->GetEndAddr());
70     ASSERT((lastTlabEndByte <= End()) && (lastTlabEndByte >= Top()));
71     return End() - lastTlabEndByte;
72 }
73 
CreateTLAB(size_t size)74 TLAB *Region::CreateTLAB(size_t size)
75 {
76     ASSERT(IsTLAB());
77     ASSERT(Begin() != 0);
78     ASSERT(Top() == Begin());
79     size_t remainingSize = GetRemainingSizeForTLABs();
80     if (remainingSize < size) {
81         return nullptr;
82     }
83     ASSERT(End() > remainingSize);
84     TLAB *tlab = GetInternalAllocator()->New<TLAB>(ToVoidPtr(End() - remainingSize), size);
85     tlabVector_->push_back(tlab);
86     return tlab;
87 }
88 
CreateMarkBitmap()89 MarkBitmap *Region::CreateMarkBitmap()
90 {
91     ASSERT(markBitmap_ == nullptr);
92     auto allocator = GetInternalAllocator();
93     auto bitmapData = allocator->Alloc(MarkBitmap::GetBitMapSizeInByte(Size()));
94     ASSERT(bitmapData != nullptr);
95     markBitmap_ = allocator->New<MarkBitmap>(this, Size(), bitmapData);
96     ASSERT(markBitmap_ != nullptr);
97     markBitmap_->ClearAllBits();
98     return markBitmap_;
99 }
100 
CreateLiveBitmap()101 MarkBitmap *Region::CreateLiveBitmap()
102 {
103     ASSERT(liveBitmap_ == nullptr);
104     auto allocator = GetInternalAllocator();
105     auto bitmapData = allocator->Alloc(MarkBitmap::GetBitMapSizeInByte(Size()));
106     ASSERT(bitmapData != nullptr);
107     liveBitmap_ = allocator->New<MarkBitmap>(this, Size(), bitmapData);
108     ASSERT(liveBitmap_ != nullptr);
109     liveBitmap_->ClearAllBits();
110     return liveBitmap_;
111 }
112 
SetMarkBit(ObjectHeader * object)113 void Region::SetMarkBit(ObjectHeader *object)
114 {
115     ASSERT(IsInRange(object));
116     markBitmap_->Set(object);
117 }
118 
CalcLiveBytes() const119 uint32_t Region::CalcLiveBytes() const
120 {
121     ASSERT(liveBitmap_ != nullptr);
122     uint32_t liveBytes = 0;
123     liveBitmap_->IterateOverMarkedChunks<true>(
124         [&liveBytes](const void *object) { liveBytes += GetAlignedObjectSize(GetObjectSize(object)); });
125     return liveBytes;
126 }
127 
CalcMarkBytes() const128 uint32_t Region::CalcMarkBytes() const
129 {
130     ASSERT(markBitmap_ != nullptr);
131     uint32_t liveBytes = 0;
132     markBitmap_->IterateOverMarkedChunks(
133         [&liveBytes](const void *object) { liveBytes += GetAlignedObjectSize(GetObjectSize(object)); });
134     return liveBytes;
135 }
136 
Destroy()137 void Region::Destroy()
138 {
139     auto allocator = GetInternalAllocator();
140     if (remSet_ != nullptr) {
141         allocator->Delete(remSet_);
142         remSet_ = nullptr;
143     }
144     if (liveBytes_ != nullptr) {
145         allocator->Delete(liveBytes_);
146         liveBytes_ = nullptr;
147     }
148     if (pinnedObjects_ != nullptr) {
149         allocator->Delete(pinnedObjects_);
150         pinnedObjects_ = nullptr;
151     }
152     if (tlabVector_ != nullptr) {
153         for (auto i : *tlabVector_) {
154             allocator->Delete(i);
155         }
156         allocator->Delete(tlabVector_);
157         tlabVector_ = nullptr;
158     }
159     if (liveBitmap_ != nullptr) {
160         allocator->Delete(liveBitmap_->GetBitMap().data());
161         allocator->Delete(liveBitmap_);
162         liveBitmap_ = nullptr;
163     }
164     if (markBitmap_ != nullptr) {
165         allocator->Delete(markBitmap_->GetBitMap().data());
166         allocator->Delete(markBitmap_);
167         markBitmap_ = nullptr;
168     }
169 }
170 
Init(uintptr_t regionsBegin,uintptr_t regionsEnd)171 void RegionBlock::Init(uintptr_t regionsBegin, uintptr_t regionsEnd)
172 {
173     os::memory::LockHolder lock(lock_);
174     ASSERT(occupied_.Empty());
175     ASSERT(Region::IsAlignment(regionsBegin, regionSize_));
176     ASSERT((regionsEnd - regionsBegin) % regionSize_ == 0);
177     size_t numRegions = (regionsEnd - regionsBegin) / regionSize_;
178     if (numRegions > 0) {
179         size_t size = numRegions * sizeof(Region *);
180         auto data = reinterpret_cast<Region **>(allocator_->Alloc(size));
181         memset_s(data, size, 0, size);
182         occupied_ = Span<Region *>(data, numRegions);
183         regionsBegin_ = regionsBegin;
184         regionsEnd_ = regionsEnd;
185     }
186 }
187 
AllocRegion()188 Region *RegionBlock::AllocRegion()
189 {
190     os::memory::LockHolder lock(lock_);
191     // NOTE(yxr) : find a unused region, improve it
192     for (size_t i = 0; i < occupied_.Size(); ++i) {
193         if (occupied_[i] == nullptr) {
194             auto *region = RegionAt(i);
195             occupied_[i] = region;
196             numUsedRegions_++;
197             return region;
198         }
199     }
200     return nullptr;
201 }
202 
AllocLargeRegion(size_t largeRegionSize)203 Region *RegionBlock::AllocLargeRegion(size_t largeRegionSize)
204 {
205     os::memory::LockHolder lock(lock_);
206     // NOTE(yxr) : search continuous unused regions, improve it
207     size_t allocRegionNum = largeRegionSize / regionSize_;
208     size_t left = 0;
209     while (left + allocRegionNum <= occupied_.Size()) {
210         bool found = true;
211         size_t right = left;
212         while (right < left + allocRegionNum) {
213             if (occupied_[right] != nullptr) {
214                 found = false;
215                 break;
216             }
217             ++right;
218         }
219         if (found) {
220             // mark those regions as 'used'
221             auto *region = RegionAt(left);
222             for (size_t i = 0; i < allocRegionNum; i++) {
223                 occupied_[left + i] = region;
224             }
225             numUsedRegions_ += allocRegionNum;
226             return region;
227         }
228         // next round
229         left = right + 1;
230     }
231     return nullptr;
232 }
233 
FreeRegion(Region * region,bool releasePages)234 void RegionBlock::FreeRegion(Region *region, bool releasePages)
235 {
236     os::memory::LockHolder lock(lock_);
237     size_t regionIdx = RegionIndex(region);
238     size_t regionNum = region->Size() / regionSize_;
239     ASSERT(regionIdx + regionNum <= occupied_.Size());
240     for (size_t i = 0; i < regionNum; i++) {
241         ASSERT(occupied_[regionIdx + i] == region);
242         occupied_[regionIdx + i] = nullptr;
243     }
244     numUsedRegions_ -= regionNum;
245     if (releasePages) {
246         os::mem::ReleasePages(ToUintPtr(region), region->End());
247     }
248 }
249 
NewRegion(RegionSpace * space,SpaceType spaceType,AllocatorType allocatorType,size_t regionSize,RegionFlag edenOrOldOrNonmovable,RegionFlag properties,OSPagesAllocPolicy allocPolicy)250 Region *RegionPool::NewRegion(RegionSpace *space, SpaceType spaceType, AllocatorType allocatorType, size_t regionSize,
251                               RegionFlag edenOrOldOrNonmovable, RegionFlag properties, OSPagesAllocPolicy allocPolicy)
252 {
253     // check that the input region_size is aligned
254     ASSERT(regionSize % regionSize_ == 0);
255     ASSERT(IsYoungRegionFlag(edenOrOldOrNonmovable) || edenOrOldOrNonmovable == RegionFlag::IS_OLD ||
256            edenOrOldOrNonmovable == RegionFlag::IS_NONMOVABLE);
257 
258     // Ensure leaving enough space so there's always some free regions in heap which we can use for full gc
259     if (edenOrOldOrNonmovable == RegionFlag::IS_NONMOVABLE || regionSize > regionSize_) {
260         if (!spaces_->CanAllocInSpace(false, regionSize + regionSize_)) {
261             return nullptr;
262         }
263     }
264 
265     if (!spaces_->CanAllocInSpace(IsYoungRegionFlag(edenOrOldOrNonmovable), regionSize)) {
266         return nullptr;
267     }
268 
269     // 1.get region from pre-allocated region block(e.g. a big mmaped continuous space)
270     void *region = nullptr;
271     if (block_.GetFreeRegionsNum() > 0) {
272         region = (regionSize <= regionSize_) ? block_.AllocRegion() : block_.AllocLargeRegion(regionSize);
273     }
274     if (region != nullptr) {
275         IsYoungRegionFlag(edenOrOldOrNonmovable) ? spaces_->IncreaseYoungOccupiedInSharedPool(regionSize)
276                                                  : spaces_->IncreaseTenuredOccupiedInSharedPool(regionSize);
277     } else if (extend_) {  // 2.mmap region directly, this is more flexible for memory usage
278         region =
279             IsYoungRegionFlag(edenOrOldOrNonmovable)
280                 ? spaces_->TryAllocPoolForYoung(regionSize, spaceType, allocatorType, this).GetMem()
281                 : spaces_->TryAllocPoolForTenured(regionSize, spaceType, allocatorType, this, allocPolicy).GetMem();
282     }
283 
284     if (UNLIKELY(region == nullptr)) {
285         return nullptr;
286     }
287     return NewRegion(region, space, regionSize, edenOrOldOrNonmovable, properties);
288 }
289 
NewRegion(void * region,RegionSpace * space,size_t regionSize,RegionFlag edenOrOldOrNonmovable,RegionFlag properties)290 Region *RegionPool::NewRegion(void *region, RegionSpace *space, size_t regionSize, RegionFlag edenOrOldOrNonmovable,
291                               RegionFlag properties)
292 {
293     ASSERT(Region::IsAlignment(ToUintPtr(region), regionSize_));
294 
295     ASAN_UNPOISON_MEMORY_REGION(region, Region::HeadSize());
296     auto *ret = new (region) Region(space, ToUintPtr(region) + Region::HeadSize(), ToUintPtr(region) + regionSize);
297     // NOTE(dtrubenkov): remove this fast fixup
298     TSAN_ANNOTATE_IGNORE_WRITES_BEGIN();
299     ret->AddFlag(edenOrOldOrNonmovable);
300     ret->AddFlag(properties);
301     ret->CreateRemSet();
302     ret->SetupAtomics();
303     ret->CreateMarkBitmap();
304     if (!IsYoungRegionFlag(edenOrOldOrNonmovable)) {
305         ret->CreateLiveBitmap();
306     }
307     TSAN_ANNOTATE_IGNORE_WRITES_END();
308     return ret;
309 }
310 
PromoteYoungRegion(Region * region)311 void RegionPool::PromoteYoungRegion(Region *region)
312 {
313     ASSERT(region->HasFlag(RegionFlag::IS_EDEN));
314     if (block_.IsAddrInRange(region)) {
315         spaces_->ReduceYoungOccupiedInSharedPool(region->Size());
316         spaces_->IncreaseTenuredOccupiedInSharedPool(region->Size());
317     } else {
318         spaces_->PromoteYoungPool(region->Size());
319     }
320     // Change region type
321     region->AddFlag(RegionFlag::IS_PROMOTED);
322     region->RmvFlag(RegionFlag::IS_EDEN);
323     region->AddFlag(RegionFlag::IS_OLD);
324 }
325 
HaveTenuredSize(size_t size) const326 bool RegionPool::HaveTenuredSize(size_t size) const
327 {
328     return spaces_->CanAllocInSpace(GenerationalSpaces::IS_TENURED_SPACE, size);
329 }
330 
HaveFreeRegions(size_t numRegions,size_t regionSize) const331 bool RegionPool::HaveFreeRegions(size_t numRegions, size_t regionSize) const
332 {
333     if (block_.GetFreeRegionsNum() >= numRegions) {
334         return true;
335     }
336     numRegions -= block_.GetFreeRegionsNum();
337     return PoolManager::GetMmapMemPool()->HaveEnoughPoolsInObjectSpace(numRegions, regionSize);
338 }
339 
NewRegion(size_t regionSize,RegionFlag edenOrOldOrNonmovable,RegionFlag properties,OSPagesAllocPolicy allocPolicy)340 Region *RegionSpace::NewRegion(size_t regionSize, RegionFlag edenOrOldOrNonmovable, RegionFlag properties,
341                                OSPagesAllocPolicy allocPolicy)
342 {
343     Region *region = nullptr;
344     auto youngRegionFlag = IsYoungRegionFlag(edenOrOldOrNonmovable);
345     // Atomic with relaxed order reason: data race with no synchronization or ordering constraints imposed
346     // on other reads or writes
347     if (youngRegionFlag && youngRegionsInUse_.load(std::memory_order_relaxed) > desiredEdenLength_) {
348         return nullptr;
349     }
350     if (youngRegionFlag && (!emptyYoungRegions_.empty())) {
351         region = GetRegionFromEmptyList(emptyYoungRegions_);
352         ASAN_UNPOISON_MEMORY_REGION(region, Region::HeadSize());
353         ASSERT(regionSize == region->Size());
354         regionPool_->NewRegion(region, this, regionSize, edenOrOldOrNonmovable, properties);
355     } else if (!youngRegionFlag && (!emptyTenuredRegions_.empty())) {
356         region = GetRegionFromEmptyList(emptyTenuredRegions_);
357         ASAN_UNPOISON_MEMORY_REGION(region, Region::HeadSize());
358         ASSERT(regionSize == region->Size());
359         regionPool_->NewRegion(region, this, regionSize, edenOrOldOrNonmovable, properties);
360     } else {
361         region = regionPool_->NewRegion(this, spaceType_, allocatorType_, regionSize, edenOrOldOrNonmovable, properties,
362                                         allocPolicy);
363     }
364     if (UNLIKELY(region == nullptr)) {
365         return nullptr;
366     }
367     ASAN_POISON_MEMORY_REGION(ToVoidPtr(region->Begin()), region->End() - region->Begin());
368     regions_.push_back(region->AsListNode());
369     if (youngRegionFlag) {
370         // Atomic with relaxed order reason: data race with no synchronization or ordering constraints imposed
371         // on other reads or writes
372         youngRegionsInUse_.fetch_add(1, std::memory_order_relaxed);
373     }
374     return region;
375 }
376 
PromoteYoungRegion(Region * region)377 void RegionSpace::PromoteYoungRegion(Region *region)
378 {
379     ASSERT(region->GetSpace() == this);
380     ASSERT(region->HasFlag(RegionFlag::IS_EDEN));
381     regionPool_->PromoteYoungRegion(region);
382     // Atomic with relaxed order reason: data race with no synchronization or ordering constraints imposed
383     // on other reads or writes
384     [[maybe_unused]] auto previousRegionsInUse = youngRegionsInUse_.fetch_sub(1, std::memory_order_relaxed);
385     ASSERT(previousRegionsInUse > 0);
386 }
387 
FreeAllRegions()388 void RegionSpace::FreeAllRegions()
389 {
390     // delete all regions
391     IterateRegions([this](Region *region) { FreeRegion(region); });
392     ReleaseEmptyRegions<RegionFlag::IS_EDEN, OSPagesPolicy::IMMEDIATE_RETURN>();
393     ReleaseEmptyRegions<RegionFlag::IS_OLD, OSPagesPolicy::IMMEDIATE_RETURN>();
394 }
395 
GetRegionFromEmptyList(DList & regionList)396 Region *RegionSpace::GetRegionFromEmptyList(DList &regionList)
397 {
398     Region *region = Region::AsRegion(&(*regionList.begin()));
399     regionList.erase(regionList.begin());
400     ASSERT(region != nullptr);
401     return region;
402 }
403 
404 }  // namespace panda::mem
405