1 /**
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #include "mem/mem_pool.h"
16 #include "runtime/mem/region_space-inl.h"
17 #include "runtime/mem/rem_set-inl.h"
18 #include "runtime/include/runtime.h"
19 #include "runtime/include/panda_vm.h"
20
21 namespace ark::mem {
22
GetAllocatedBytes() const23 uint32_t Region::GetAllocatedBytes() const
24 {
25 if (!IsTLAB() || IsMixedTLAB()) {
26 return top_ - begin_;
27 }
28 uint32_t allocatedBytes = 0;
29 ASSERT(tlabVector_ != nullptr);
30 for (auto i : *tlabVector_) {
31 allocatedBytes += i->GetOccupiedSize();
32 }
33 return allocatedBytes;
34 }
35
GetFragmentation() const36 double Region::GetFragmentation() const
37 {
38 ASSERT(Size() >= GetAllocatedBytes());
39 return static_cast<double>(Size() - GetAllocatedBytes()) / Size();
40 }
41
IsInRange(const ObjectHeader * object) const42 bool Region::IsInRange(const ObjectHeader *object) const
43 {
44 return ToUintPtr(object) >= begin_ && ToUintPtr(object) < end_;
45 }
46
IsInAllocRange(const ObjectHeader * object) const47 bool Region::IsInAllocRange(const ObjectHeader *object) const
48 {
49 bool inRange = false;
50 if (!IsTLAB()) {
51 inRange = (ToUintPtr(object) >= begin_ && ToUintPtr(object) < top_);
52 } else {
53 for (auto i : *tlabVector_) {
54 inRange = i->ContainObject(object);
55 if (inRange) {
56 break;
57 }
58 }
59 if (IsMixedTLAB() && !inRange) {
60 inRange = (ToUintPtr(object) >= ToUintPtr(tlabVector_->back()->GetEndAddr()) && ToUintPtr(object) < top_);
61 }
62 }
63 return inRange;
64 }
65
GetInternalAllocator()66 InternalAllocatorPtr Region::GetInternalAllocator()
67 {
68 return space_->GetPool()->GetInternalAllocator();
69 }
70
CreateRemSet()71 void Region::CreateRemSet()
72 {
73 ASSERT(remSet_ == nullptr);
74 remSet_ = GetInternalAllocator()->New<RemSetT>();
75 }
76
SetupAtomics()77 void Region::SetupAtomics()
78 {
79 liveBytes_ = GetInternalAllocator()->New<std::atomic<uint32_t>>();
80 pinnedObjects_ = GetInternalAllocator()->New<std::atomic<uint32_t>>();
81 }
82
CreateTLABSupport()83 void Region::CreateTLABSupport()
84 {
85 ASSERT(tlabVector_ == nullptr);
86 tlabVector_ = GetInternalAllocator()->New<PandaVector<TLAB *>>(GetInternalAllocator()->Adapter());
87 }
88
GetRemainingSizeForTLABs() const89 size_t Region::GetRemainingSizeForTLABs() const
90 {
91 ASSERT(IsTLAB());
92 ASSERT(!IsMixedTLAB());
93 // TLABs are stored one by one.
94 uintptr_t lastTlabEndByte = tlabVector_->empty() ? Top() : ToUintPtr(tlabVector_->back()->GetEndAddr());
95 ASSERT((lastTlabEndByte <= End()) && (lastTlabEndByte >= Top()));
96 return End() - lastTlabEndByte;
97 }
98
CreateTLAB(size_t size)99 TLAB *Region::CreateTLAB(size_t size)
100 {
101 ASSERT(IsTLAB());
102 ASSERT(!IsMixedTLAB());
103 ASSERT(Begin() != 0);
104 ASSERT(Top() == Begin());
105 size_t remainingSize = GetRemainingSizeForTLABs();
106 if (remainingSize < size) {
107 return nullptr;
108 }
109 ASSERT(End() > remainingSize);
110 TLAB *tlab = GetInternalAllocator()->New<TLAB>(ToVoidPtr(End() - remainingSize), size);
111 tlabVector_->push_back(tlab);
112 return tlab;
113 }
114
CreateMarkBitmap()115 MarkBitmap *Region::CreateMarkBitmap()
116 {
117 ASSERT(markBitmap_ == nullptr);
118 auto allocator = GetInternalAllocator();
119 auto bitmapData = allocator->Alloc(MarkBitmap::GetBitMapSizeInByte(Size()));
120 ASSERT(bitmapData != nullptr);
121 markBitmap_ = allocator->New<MarkBitmap>(this, Size(), bitmapData);
122 ASSERT(markBitmap_ != nullptr);
123 markBitmap_->ClearAllBits();
124 return markBitmap_;
125 }
126
CreateLiveBitmap()127 MarkBitmap *Region::CreateLiveBitmap()
128 {
129 ASSERT(liveBitmap_ == nullptr);
130 auto allocator = GetInternalAllocator();
131 auto bitmapData = allocator->Alloc(MarkBitmap::GetBitMapSizeInByte(Size()));
132 ASSERT(bitmapData != nullptr);
133 liveBitmap_ = allocator->New<MarkBitmap>(this, Size(), bitmapData);
134 ASSERT(liveBitmap_ != nullptr);
135 liveBitmap_->ClearAllBits();
136 return liveBitmap_;
137 }
138
SwapMarkBitmap()139 void Region::SwapMarkBitmap()
140 {
141 ASSERT(liveBitmap_ != nullptr);
142 ASSERT(markBitmap_ != nullptr);
143 std::swap(liveBitmap_, markBitmap_);
144 }
145
CloneMarkBitmapToLiveBitmap()146 void Region::CloneMarkBitmapToLiveBitmap()
147 {
148 ASSERT(liveBitmap_ != nullptr);
149 ASSERT(markBitmap_ != nullptr);
150 markBitmap_->CopyTo(liveBitmap_);
151 }
152
SetMarkBit(ObjectHeader * object)153 void Region::SetMarkBit(ObjectHeader *object)
154 {
155 ASSERT(IsInRange(object));
156 markBitmap_->Set(object);
157 }
158
CalcLiveBytes() const159 uint32_t Region::CalcLiveBytes() const
160 {
161 ASSERT(liveBitmap_ != nullptr);
162 uint32_t liveBytes = 0;
163 liveBitmap_->IterateOverMarkedChunks<true>(
164 [&liveBytes](const void *object) { liveBytes += GetAlignedObjectSize(GetObjectSize(object)); });
165 return liveBytes;
166 }
167
CalcMarkBytes() const168 uint32_t Region::CalcMarkBytes() const
169 {
170 ASSERT(markBitmap_ != nullptr);
171 uint32_t liveBytes = 0;
172 markBitmap_->IterateOverMarkedChunks(
173 [&liveBytes](const void *object) { liveBytes += GetAlignedObjectSize(GetObjectSize(object)); });
174 return liveBytes;
175 }
176
Destroy()177 void Region::Destroy()
178 {
179 auto allocator = GetInternalAllocator();
180 if (remSet_ != nullptr) {
181 allocator->Delete(remSet_);
182 remSet_ = nullptr;
183 }
184 if (liveBytes_ != nullptr) {
185 allocator->Delete(liveBytes_);
186 liveBytes_ = nullptr;
187 }
188 if (pinnedObjects_ != nullptr) {
189 allocator->Delete(pinnedObjects_);
190 pinnedObjects_ = nullptr;
191 }
192 if (tlabVector_ != nullptr) {
193 for (auto i : *tlabVector_) {
194 allocator->Delete(i);
195 }
196 allocator->Delete(tlabVector_);
197 tlabVector_ = nullptr;
198 }
199 if (liveBitmap_ != nullptr) {
200 allocator->Delete(liveBitmap_->GetBitMap().data());
201 allocator->Delete(liveBitmap_);
202 liveBitmap_ = nullptr;
203 }
204 if (markBitmap_ != nullptr) {
205 allocator->Delete(markBitmap_->GetBitMap().data());
206 allocator->Delete(markBitmap_);
207 markBitmap_ = nullptr;
208 }
209 }
210
Init(uintptr_t regionsBegin,uintptr_t regionsEnd)211 void RegionBlock::Init(uintptr_t regionsBegin, uintptr_t regionsEnd)
212 {
213 os::memory::LockHolder lock(lock_);
214 ASSERT(occupied_.Empty());
215 ASSERT(Region::IsAlignment(regionsBegin, regionSize_));
216 ASSERT((regionsEnd - regionsBegin) % regionSize_ == 0);
217 size_t numRegions = (regionsEnd - regionsBegin) / regionSize_;
218 if (numRegions > 0) {
219 size_t size = numRegions * sizeof(Region *);
220 auto data = reinterpret_cast<Region **>(allocator_->Alloc(size));
221 memset_s(data, size, 0, size);
222 occupied_ = Span<Region *>(data, numRegions);
223 regionsBegin_ = regionsBegin;
224 regionsEnd_ = regionsEnd;
225 }
226 }
227
AllocRegion()228 Region *RegionBlock::AllocRegion()
229 {
230 os::memory::LockHolder lock(lock_);
231 // NOTE(yxr) : find a unused region, improve it
232 for (size_t i = 0; i < occupied_.Size(); ++i) {
233 if (occupied_[i] == nullptr) {
234 auto *region = RegionAt(i);
235 occupied_[i] = region;
236 numUsedRegions_++;
237 return region;
238 }
239 }
240 return nullptr;
241 }
242
AllocLargeRegion(size_t largeRegionSize)243 Region *RegionBlock::AllocLargeRegion(size_t largeRegionSize)
244 {
245 os::memory::LockHolder lock(lock_);
246 // NOTE(yxr) : search continuous unused regions, improve it
247 size_t allocRegionNum = largeRegionSize / regionSize_;
248 size_t left = 0;
249 while (left + allocRegionNum <= occupied_.Size()) {
250 bool found = true;
251 size_t right = left;
252 while (right < left + allocRegionNum) {
253 if (occupied_[right] != nullptr) {
254 found = false;
255 break;
256 }
257 ++right;
258 }
259 if (found) {
260 // mark those regions as 'used'
261 auto *region = RegionAt(left);
262 for (size_t i = 0; i < allocRegionNum; i++) {
263 occupied_[left + i] = region;
264 }
265 numUsedRegions_ += allocRegionNum;
266 return region;
267 }
268 // next round
269 left = right + 1;
270 }
271 return nullptr;
272 }
273
FreeRegion(Region * region,bool releasePages)274 void RegionBlock::FreeRegion(Region *region, bool releasePages)
275 {
276 os::memory::LockHolder lock(lock_);
277 size_t regionIdx = RegionIndex(region);
278 size_t regionNum = region->Size() / regionSize_;
279 ASSERT(regionIdx + regionNum <= occupied_.Size());
280 for (size_t i = 0; i < regionNum; i++) {
281 ASSERT(occupied_[regionIdx + i] == region);
282 occupied_[regionIdx + i] = nullptr;
283 }
284 numUsedRegions_ -= regionNum;
285 if (releasePages) {
286 os::mem::ReleasePages(ToUintPtr(region), region->End());
287 }
288 }
289
NewRegion(RegionSpace * space,SpaceType spaceType,AllocatorType allocatorType,size_t regionSize,RegionFlag edenOrOldOrNonmovable,RegionFlag properties,OSPagesAllocPolicy allocPolicy)290 Region *RegionPool::NewRegion(RegionSpace *space, SpaceType spaceType, AllocatorType allocatorType, size_t regionSize,
291 RegionFlag edenOrOldOrNonmovable, RegionFlag properties, OSPagesAllocPolicy allocPolicy)
292 {
293 // check that the input region_size is aligned
294 ASSERT(regionSize % regionSize_ == 0);
295 ASSERT(IsYoungRegionFlag(edenOrOldOrNonmovable) || edenOrOldOrNonmovable == RegionFlag::IS_OLD ||
296 edenOrOldOrNonmovable == RegionFlag::IS_NONMOVABLE);
297
298 // Ensure leaving enough space so there's always some free regions in heap which we can use for full gc
299 if (edenOrOldOrNonmovable == RegionFlag::IS_NONMOVABLE || regionSize > regionSize_) {
300 if (!spaces_->CanAllocInSpace(false, regionSize + regionSize_)) {
301 return nullptr;
302 }
303 }
304
305 if (!spaces_->CanAllocInSpace(IsYoungRegionFlag(edenOrOldOrNonmovable), regionSize)) {
306 return nullptr;
307 }
308
309 // 1.get region from pre-allocated region block(e.g. a big mmaped continuous space)
310 void *region = nullptr;
311 if (block_.GetFreeRegionsNum() > 0) {
312 region = (regionSize <= regionSize_) ? block_.AllocRegion() : block_.AllocLargeRegion(regionSize);
313 }
314 if (region != nullptr) {
315 IsYoungRegionFlag(edenOrOldOrNonmovable) ? spaces_->IncreaseYoungOccupiedInSharedPool(regionSize)
316 : spaces_->IncreaseTenuredOccupiedInSharedPool(regionSize);
317 } else if (extend_) { // 2.mmap region directly, this is more flexible for memory usage
318 region =
319 IsYoungRegionFlag(edenOrOldOrNonmovable)
320 ? spaces_->TryAllocPoolForYoung(regionSize, spaceType, allocatorType, this).GetMem()
321 : spaces_->TryAllocPoolForTenured(regionSize, spaceType, allocatorType, this, allocPolicy).GetMem();
322 }
323
324 if (UNLIKELY(region == nullptr)) {
325 return nullptr;
326 }
327 return NewRegion(region, space, regionSize, edenOrOldOrNonmovable, properties);
328 }
329
NewRegion(void * region,RegionSpace * space,size_t regionSize,RegionFlag edenOrOldOrNonmovable,RegionFlag properties)330 Region *RegionPool::NewRegion(void *region, RegionSpace *space, size_t regionSize, RegionFlag edenOrOldOrNonmovable,
331 RegionFlag properties)
332 {
333 ASSERT(Region::IsAlignment(ToUintPtr(region), regionSize_));
334
335 ASAN_UNPOISON_MEMORY_REGION(region, Region::HeadSize());
336 auto *ret = new (region) Region(space, ToUintPtr(region) + Region::HeadSize(), ToUintPtr(region) + regionSize);
337 // NOTE(dtrubenkov): remove this fast fixup
338 TSAN_ANNOTATE_IGNORE_WRITES_BEGIN();
339 ret->AddFlag(edenOrOldOrNonmovable);
340 ret->AddFlag(properties);
341 ret->CreateRemSet();
342 ret->SetupAtomics();
343 ret->CreateMarkBitmap();
344 if (!IsYoungRegionFlag(edenOrOldOrNonmovable)) {
345 ret->CreateLiveBitmap();
346 }
347 TSAN_ANNOTATE_IGNORE_WRITES_END();
348 return ret;
349 }
350
PromoteYoungRegion(Region * region)351 void RegionPool::PromoteYoungRegion(Region *region)
352 {
353 ASSERT(region->HasFlag(RegionFlag::IS_EDEN));
354 if (block_.IsAddrInRange(region)) {
355 spaces_->ReduceYoungOccupiedInSharedPool(region->Size());
356 spaces_->IncreaseTenuredOccupiedInSharedPool(region->Size());
357 } else {
358 spaces_->PromoteYoungPool(region->Size());
359 }
360 // Change region type
361 region->AddFlag(RegionFlag::IS_PROMOTED);
362 region->RmvFlag(RegionFlag::IS_EDEN);
363 region->AddFlag(RegionFlag::IS_OLD);
364 }
365
HaveTenuredSize(size_t size) const366 bool RegionPool::HaveTenuredSize(size_t size) const
367 {
368 return spaces_->CanAllocInSpace(GenerationalSpaces::IS_TENURED_SPACE, size);
369 }
370
HaveFreeRegions(size_t numRegions,size_t regionSize) const371 bool RegionPool::HaveFreeRegions(size_t numRegions, size_t regionSize) const
372 {
373 if (block_.GetFreeRegionsNum() >= numRegions) {
374 return true;
375 }
376 numRegions -= block_.GetFreeRegionsNum();
377 return PoolManager::GetMmapMemPool()->HaveEnoughPoolsInObjectSpace(numRegions, regionSize);
378 }
379
NewRegion(size_t regionSize,RegionFlag edenOrOldOrNonmovable,RegionFlag properties,OSPagesAllocPolicy allocPolicy)380 Region *RegionSpace::NewRegion(size_t regionSize, RegionFlag edenOrOldOrNonmovable, RegionFlag properties,
381 OSPagesAllocPolicy allocPolicy)
382 {
383 Region *region = nullptr;
384 auto youngRegionFlag = IsYoungRegionFlag(edenOrOldOrNonmovable);
385 // Atomic with relaxed order reason: data race with no synchronization or ordering constraints imposed
386 // on other reads or writes
387 if (youngRegionFlag && youngRegionsInUse_.load(std::memory_order_relaxed) >= desiredEdenLength_) {
388 return nullptr;
389 }
390 if (youngRegionFlag && (!emptyYoungRegions_.empty())) {
391 region = GetRegionFromEmptyList(emptyYoungRegions_);
392 ASAN_UNPOISON_MEMORY_REGION(region, Region::HeadSize());
393 ASSERT(regionSize == region->Size());
394 regionPool_->NewRegion(region, this, regionSize, edenOrOldOrNonmovable, properties);
395 } else if (!youngRegionFlag && (!emptyTenuredRegions_.empty())) {
396 region = GetRegionFromEmptyList(emptyTenuredRegions_);
397 ASAN_UNPOISON_MEMORY_REGION(region, Region::HeadSize());
398 ASSERT(regionSize == region->Size());
399 regionPool_->NewRegion(region, this, regionSize, edenOrOldOrNonmovable, properties);
400 } else {
401 region = regionPool_->NewRegion(this, spaceType_, allocatorType_, regionSize, edenOrOldOrNonmovable, properties,
402 allocPolicy);
403 }
404 if (UNLIKELY(region == nullptr)) {
405 return nullptr;
406 }
407 ASAN_POISON_MEMORY_REGION(ToVoidPtr(region->Begin()), region->End() - region->Begin());
408 regions_.push_back(region->AsListNode());
409 if (youngRegionFlag) {
410 // Atomic with relaxed order reason: data race with no synchronization or ordering constraints imposed
411 // on other reads or writes
412 youngRegionsInUse_.fetch_add(1, std::memory_order_relaxed);
413 }
414 return region;
415 }
416
PromoteYoungRegion(Region * region)417 void RegionSpace::PromoteYoungRegion(Region *region)
418 {
419 ASSERT(region->GetSpace() == this);
420 ASSERT(region->HasFlag(RegionFlag::IS_EDEN));
421 if (region->IsTLAB()) {
422 region->AddFlag(RegionFlag::IS_MIXEDTLAB);
423 region->SetTop(ToUintPtr(region->GetLastTLAB()->GetEndAddr()));
424 }
425 regionPool_->PromoteYoungRegion(region);
426 // Atomic with relaxed order reason: data race with no synchronization or ordering constraints imposed
427 // on other reads or writes
428 [[maybe_unused]] auto previousRegionsInUse = youngRegionsInUse_.fetch_sub(1, std::memory_order_relaxed);
429 ASSERT(previousRegionsInUse > 0);
430 }
431
FreeAllRegions()432 void RegionSpace::FreeAllRegions()
433 {
434 // delete all regions
435 IterateRegions([this](Region *region) { FreeRegion(region); });
436 ReleaseEmptyRegions<RegionFlag::IS_EDEN, OSPagesPolicy::IMMEDIATE_RETURN>();
437 ReleaseEmptyRegions<RegionFlag::IS_OLD, OSPagesPolicy::IMMEDIATE_RETURN>();
438 }
439
GetRegionFromEmptyList(DList & regionList)440 Region *RegionSpace::GetRegionFromEmptyList(DList ®ionList)
441 {
442 Region *region = Region::AsRegion(&(*regionList.begin()));
443 regionList.erase(regionList.begin());
444 ASSERT(region != nullptr);
445 return region;
446 }
447
448 } // namespace ark::mem
449