1 /**
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #ifndef PANDA_RUNTIME_MEM_REGION_SPACE_INL_H
16 #define PANDA_RUNTIME_MEM_REGION_SPACE_INL_H
17
18 #include "runtime/mem/region_space.h"
19 #include "libpandabase/utils/asan_interface.h"
20
21 namespace ark::mem {
22
23 class RegionAllocCheck {
24 public:
RegionAllocCheck(Region * region)25 explicit RegionAllocCheck(Region *region) : region_(region)
26 {
27 ASSERT(region_->SetAllocating(true));
28 }
~RegionAllocCheck()29 ~RegionAllocCheck()
30 {
31 ASSERT(region_->SetAllocating(false));
32 }
33 NO_COPY_SEMANTIC(RegionAllocCheck);
34 NO_MOVE_SEMANTIC(RegionAllocCheck);
35
36 private:
37 Region *region_ FIELD_UNUSED;
38 };
39
40 class RegionIterateCheck {
41 public:
RegionIterateCheck(Region * region)42 explicit RegionIterateCheck(Region *region) : region_(region)
43 {
44 ASSERT(region_->SetIterating(true));
45 }
~RegionIterateCheck()46 ~RegionIterateCheck()
47 {
48 ASSERT(region_->SetIterating(false));
49 }
50 NO_COPY_SEMANTIC(RegionIterateCheck);
51 NO_MOVE_SEMANTIC(RegionIterateCheck);
52
53 private:
54 Region *region_ FIELD_UNUSED;
55 };
56
57 template <bool ATOMIC>
Alloc(size_t alignedSize)58 void *Region::Alloc(size_t alignedSize)
59 {
60 RegionAllocCheck alloc(this);
61 ASSERT(AlignUp(alignedSize, DEFAULT_ALIGNMENT_IN_BYTES) == alignedSize);
62 ASSERT(!IsTLAB() || IsMixedTLAB());
63 uintptr_t oldTop;
64 uintptr_t newTop;
65 if (ATOMIC) {
66 auto atomicTop = reinterpret_cast<std::atomic<uintptr_t> *>(&top_);
67 // Atomic with relaxed order reason: data race with top_ with no synchronization or ordering constraints imposed
68 // on other reads or writes
69 oldTop = atomicTop->load(std::memory_order_relaxed);
70 do {
71 newTop = oldTop + alignedSize;
72 if (UNLIKELY(newTop > end_)) {
73 return nullptr;
74 }
75 } while (!atomicTop->compare_exchange_weak(oldTop, newTop, std::memory_order_relaxed));
76 ASAN_UNPOISON_MEMORY_REGION(ToVoidPtr(oldTop), alignedSize);
77 return ToVoidPtr(oldTop);
78 }
79 newTop = top_ + alignedSize;
80 if (UNLIKELY(newTop > end_)) {
81 return nullptr;
82 }
83 oldTop = top_;
84 top_ = newTop;
85
86 ASAN_UNPOISON_MEMORY_REGION(ToVoidPtr(oldTop), alignedSize);
87 return ToVoidPtr(oldTop);
88 }
89
UndoAlloc(void * addr)90 inline void Region::UndoAlloc(void *addr)
91 {
92 RegionAllocCheck alloc(this);
93 top_ = ToUintPtr(addr);
94 }
95
96 template <typename ObjectVisitor>
IterateOverObjects(const ObjectVisitor & visitor)97 void Region::IterateOverObjects(const ObjectVisitor &visitor)
98 {
99 // This method doesn't work for nonmovable regions
100 ASSERT(!HasFlag(RegionFlag::IS_NONMOVABLE));
101 // currently just for gc stw phase, so check it is not in allocating state
102 RegionIterateCheck iterate(this);
103 if (!IsTLAB()) {
104 auto curPtr = Begin();
105 auto endPtr = Top();
106 while (curPtr < endPtr) {
107 auto objectHeader = reinterpret_cast<ObjectHeader *>(curPtr);
108 size_t objectSize = GetObjectSize(objectHeader);
109 visitor(objectHeader);
110 curPtr = AlignUp(curPtr + objectSize, DEFAULT_ALIGNMENT_IN_BYTES);
111 }
112 } else {
113 for (auto i : *tlabVector_) {
114 i->IterateOverObjects(visitor);
115 }
116 if (IsMixedTLAB()) {
117 auto curPtr = ToUintPtr(GetLastTLAB()->GetEndAddr());
118 auto endPtr = Top();
119 while (curPtr < endPtr) {
120 auto *objectHeader = reinterpret_cast<ObjectHeader *>(curPtr);
121 size_t objectSize = GetObjectSize(objectHeader);
122 visitor(objectHeader);
123 curPtr = AlignUp(curPtr + objectSize, DEFAULT_ALIGNMENT_IN_BYTES);
124 }
125 }
126 }
127 }
128
129 template <OSPagesPolicy OS_PAGES_POLICY>
FreeRegion(Region * region)130 void RegionPool::FreeRegion(Region *region)
131 {
132 bool releasePages = OS_PAGES_POLICY == OSPagesPolicy::IMMEDIATE_RETURN;
133 if (block_.IsAddrInRange(region)) {
134 region->IsYoung() ? spaces_->ReduceYoungOccupiedInSharedPool(region->Size())
135 : spaces_->ReduceTenuredOccupiedInSharedPool(region->Size());
136 block_.FreeRegion(region, releasePages);
137 } else {
138 region->IsYoung() ? spaces_->FreeYoungPool(region, region->Size(), releasePages)
139 : spaces_->FreeTenuredPool(region, region->Size(), releasePages);
140 }
141 }
142
143 template <RegionSpace::ReleaseRegionsPolicy REGIONS_RELEASE_POLICY, OSPagesPolicy OS_PAGES_POLICY>
FreeRegion(Region * region)144 void RegionSpace::FreeRegion(Region *region)
145 {
146 ASSERT(region->GetSpace() == this);
147 ASAN_POISON_MEMORY_REGION(ToVoidPtr(region->Begin()), region->End() - region->Begin());
148 regions_.erase(region->AsListNode());
149 if (region->IsYoung()) {
150 // Atomic with relaxed order reason: data race with no synchronization or ordering constraints imposed
151 // on other reads or writes
152 [[maybe_unused]] auto previousRegionsInUse = youngRegionsInUse_.fetch_sub(1, std::memory_order_relaxed);
153 ASSERT(previousRegionsInUse > 0);
154 }
155 region->Destroy();
156 // NOLINTNEXTLINE(readability-braces-around-statements)
157 if constexpr (REGIONS_RELEASE_POLICY == ReleaseRegionsPolicy::NoRelease) {
158 if (region->IsYoung()) {
159 // unlimited
160 emptyYoungRegions_.push_back(region->AsListNode());
161 return;
162 }
163 if (region->HasFlag(RegionFlag::IS_OLD) && (emptyTenuredRegions_.size() < emptyTenuredRegionsMaxCount_)) {
164 emptyTenuredRegions_.push_back(region->AsListNode());
165 return;
166 }
167 }
168 regionPool_->FreeRegion<OS_PAGES_POLICY>(region);
169 }
170
171 template <RegionFlag REGION_TYPE, OSPagesPolicy OS_PAGES_POLICY>
ReleaseEmptyRegions()172 void RegionSpace::ReleaseEmptyRegions()
173 {
174 auto visitor = [this](Region *region) { regionPool_->FreeRegion<OS_PAGES_POLICY>(region); };
175 if (IsYoungRegionFlag(REGION_TYPE)) {
176 IterateRegionsList(emptyYoungRegions_, visitor);
177 emptyYoungRegions_.clear();
178 } else {
179 IterateRegionsList(emptyTenuredRegions_, visitor);
180 emptyTenuredRegions_.clear();
181 }
182 }
183
184 template <typename RegionVisitor>
IterateRegions(RegionVisitor visitor)185 void RegionSpace::IterateRegions(RegionVisitor visitor)
186 {
187 IterateRegionsList(regions_, visitor);
188 }
189
190 template <typename RegionVisitor>
IterateRegionsList(DList & regionsList,RegionVisitor visitor)191 void RegionSpace::IterateRegionsList(DList ®ionsList, RegionVisitor visitor)
192 {
193 auto it = regionsList.begin();
194 while (it != regionsList.end()) {
195 auto *region = Region::AsRegion(&(*it));
196 ++it; // increase before visitor which may remove region
197 visitor(region);
198 }
199 }
200
201 template <bool CROSS_REGION>
ContainObject(const ObjectHeader * object)202 bool RegionSpace::ContainObject(const ObjectHeader *object) const
203 {
204 return GetRegion<CROSS_REGION>(object) != nullptr;
205 }
206
207 template <bool CROSS_REGION>
IsLive(const ObjectHeader * object)208 bool RegionSpace::IsLive(const ObjectHeader *object) const
209 {
210 auto *region = GetRegion<CROSS_REGION>(object);
211
212 // check if the object is live in the range
213 return region != nullptr && region->IsInAllocRange(object);
214 }
215
216 } // namespace ark::mem
217
218 #endif // PANDA_RUNTIME_MEM_REGION_SPACE_INL_H
219