1 /**
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #ifndef RUNTIME_MEM_PANDA_BUMP_ALLOCATOR_INL_H
16 #define RUNTIME_MEM_PANDA_BUMP_ALLOCATOR_INL_H
17
18 #include "libpandabase/utils/logger.h"
19 #include "runtime/include/mem/allocator.h"
20 #include "runtime/mem/bump-allocator.h"
21 #include "runtime/mem/object_helpers.h"
22 #include "runtime/mem/alloc_config.h"
23
24 namespace ark::mem {
25
26 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
27 #define LOG_BUMP_ALLOCATOR(level) LOG(level, ALLOC) << "BumpPointerAllocator: "
28
29 template <typename AllocConfigT, typename LockConfigT, bool USE_TLABS>
BumpPointerAllocator(Pool pool,SpaceType typeAllocation,MemStatsType * memStats,size_t tlabsMaxCount)30 BumpPointerAllocator<AllocConfigT, LockConfigT, USE_TLABS>::BumpPointerAllocator(Pool pool, SpaceType typeAllocation,
31 MemStatsType *memStats,
32 size_t tlabsMaxCount)
33 : arena_(pool.GetSize(), pool.GetMem()),
34 tlabManager_(tlabsMaxCount),
35 typeAllocation_(typeAllocation),
36 memStats_(memStats)
37 {
38 LOG_BUMP_ALLOCATOR(DEBUG) << "Initializing of BumpPointerAllocator";
39 AllocConfigT::InitializeCrossingMapForMemory(pool.GetMem(), arena_.GetSize());
40 LOG_BUMP_ALLOCATOR(DEBUG) << "Initializing of BumpPointerAllocator finished";
41 ASSERT(USE_TLABS ? tlabsMaxCount > 0 : tlabsMaxCount == 0);
42 }
43
44 template <typename AllocConfigT, typename LockConfigT, bool USE_TLABS>
~BumpPointerAllocator()45 BumpPointerAllocator<AllocConfigT, LockConfigT, USE_TLABS>::~BumpPointerAllocator()
46 {
47 LOG_BUMP_ALLOCATOR(DEBUG) << "Destroying of BumpPointerAllocator";
48 LOG_BUMP_ALLOCATOR(DEBUG) << "Destroying of BumpPointerAllocator finished";
49 }
50
51 template <typename AllocConfigT, typename LockConfigT, bool USE_TLABS>
Reset()52 void BumpPointerAllocator<AllocConfigT, LockConfigT, USE_TLABS>::Reset()
53 {
54 // Remove CrossingMap and create for avoid check in Alloc method
55 if (LIKELY(arena_.GetOccupiedSize() > 0)) {
56 AllocConfigT::RemoveCrossingMapForMemory(arena_.GetMem(), arena_.GetSize());
57 AllocConfigT::InitializeCrossingMapForMemory(arena_.GetMem(), arena_.GetSize());
58 }
59 arena_.Reset();
60 if constexpr (USE_TLABS) {
61 tlabManager_.Reset();
62 }
63 }
64
65 template <typename AllocConfigT, typename LockConfigT, bool USE_TLABS>
ExpandMemory(void * mem,size_t size)66 void BumpPointerAllocator<AllocConfigT, LockConfigT, USE_TLABS>::ExpandMemory(void *mem, size_t size)
67 {
68 LOG_BUMP_ALLOCATOR(DEBUG) << "Expand memory: Add " << std::dec << size << " bytes of memory at addr " << std::hex
69 << mem;
70 ASSERT(ToUintPtr(arena_.GetArenaEnd()) == ToUintPtr(mem));
71 if constexpr (USE_TLABS) {
72 UNREACHABLE();
73 }
74 arena_.ExpandArena(mem, size);
75 AllocConfigT::InitializeCrossingMapForMemory(mem, size);
76 }
77
78 template <typename AllocConfigT, typename LockConfigT, bool USE_TLABS>
Alloc(size_t size,Alignment alignment)79 void *BumpPointerAllocator<AllocConfigT, LockConfigT, USE_TLABS>::Alloc(size_t size, Alignment alignment)
80 {
81 os::memory::LockHolder lock(allocatorLock_);
82 LOG_BUMP_ALLOCATOR(DEBUG) << "Try to allocate " << std::dec << size << " bytes of memory";
83 ASSERT(alignment == DEFAULT_ALIGNMENT);
84 // We need to align up it here to write correct used memory size inside MemStats.
85 // (each element allocated via BumpPointer allocator has DEFAULT_ALIGNMENT alignment).
86 size = AlignUp(size, DEFAULT_ALIGNMENT_IN_BYTES);
87 void *mem = nullptr;
88 // NOLINTNEXTLINE(readability-braces-around-statements)
89 if constexpr (!USE_TLABS) {
90 // Use common scenario
91 mem = arena_.Alloc(size, alignment);
92 // NOLINTNEXTLINE(readability-misleading-indentation)
93 } else {
94 // We must take TLABs occupied memory into account.
95 ASSERT(arena_.GetFreeSize() >= tlabManager_.GetTLABsOccupiedSize());
96 if (arena_.GetFreeSize() - tlabManager_.GetTLABsOccupiedSize() >= size) {
97 mem = arena_.Alloc(size, alignment);
98 }
99 }
100 if (mem == nullptr) {
101 LOG_BUMP_ALLOCATOR(DEBUG) << "Couldn't allocate memory";
102 return nullptr;
103 }
104 AllocConfigT::OnAlloc(size, typeAllocation_, memStats_);
105 AllocConfigT::AddToCrossingMap(mem, size);
106 AllocConfigT::MemoryInit(mem);
107 return mem;
108 }
109
110 template <typename AllocConfigT, typename LockConfigT, bool USE_TLABS>
CreateNewTLAB(size_t size)111 TLAB *BumpPointerAllocator<AllocConfigT, LockConfigT, USE_TLABS>::CreateNewTLAB(size_t size)
112 {
113 os::memory::LockHolder lock(allocatorLock_);
114 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
115 if constexpr (!USE_TLABS) {
116 UNREACHABLE();
117 }
118 LOG_BUMP_ALLOCATOR(DEBUG) << "Try to create a TLAB with size " << std::dec << size;
119 ASSERT(size == AlignUp(size, DEFAULT_ALIGNMENT_IN_BYTES));
120 TLAB *tlab = nullptr;
121 ASSERT(arena_.GetFreeSize() >= tlabManager_.GetTLABsOccupiedSize());
122 if (arena_.GetFreeSize() - tlabManager_.GetTLABsOccupiedSize() >= size) {
123 tlab = tlabManager_.GetUnusedTLABInstance();
124 if (tlab != nullptr) {
125 tlabManager_.IncreaseTLABsOccupiedSize(size);
126 uintptr_t endOfArena = ToUintPtr(arena_.GetArenaEnd());
127 ASSERT(endOfArena >= tlabManager_.GetTLABsOccupiedSize());
128 void *tlabBufferStart = ToVoidPtr(endOfArena - tlabManager_.GetTLABsOccupiedSize());
129 tlab->Fill(tlabBufferStart, size);
130 LOG_BUMP_ALLOCATOR(DEBUG) << "Created new TLAB with size " << std::dec << size << " at addr " << std::hex
131 << tlabBufferStart;
132 } else {
133 LOG_BUMP_ALLOCATOR(DEBUG) << "Reached the limit of TLABs inside the allocator";
134 }
135 } else {
136 LOG_BUMP_ALLOCATOR(DEBUG) << "Don't have enough memory for new TLAB with size " << std::dec << size;
137 }
138 return tlab;
139 }
140
141 template <typename AllocConfigT, typename LockConfigT, bool USE_TLABS>
VisitAndRemoveAllPools(const MemVisitor & memVisitor)142 void BumpPointerAllocator<AllocConfigT, LockConfigT, USE_TLABS>::VisitAndRemoveAllPools(const MemVisitor &memVisitor)
143 {
144 os::memory::LockHolder lock(allocatorLock_);
145 AllocConfigT::RemoveCrossingMapForMemory(arena_.GetMem(), arena_.GetSize());
146 memVisitor(arena_.GetMem(), arena_.GetSize());
147 }
148
149 template <typename AllocConfigT, typename LockConfigT, bool USE_TLABS>
VisitAndRemoveFreePools(const MemVisitor & memVisitor)150 void BumpPointerAllocator<AllocConfigT, LockConfigT, USE_TLABS>::VisitAndRemoveFreePools(const MemVisitor &memVisitor)
151 {
152 (void)memVisitor;
153 os::memory::LockHolder lock(allocatorLock_);
154 // We should do nothing here
155 }
156
157 template <typename AllocConfigT, typename LockConfigT, bool USE_TLABS>
IterateOverObjects(const std::function<void (ObjectHeader * objectHeader)> & objectVisitor)158 void BumpPointerAllocator<AllocConfigT, LockConfigT, USE_TLABS>::IterateOverObjects(
159 const std::function<void(ObjectHeader *objectHeader)> &objectVisitor)
160 {
161 os::memory::LockHolder lock(allocatorLock_);
162 LOG_BUMP_ALLOCATOR(DEBUG) << "Iteration over objects started";
163 void *curPtr = arena_.GetAllocatedStart();
164 void *endPtr = arena_.GetAllocatedEnd();
165 while (curPtr < endPtr) {
166 auto objectHeader = static_cast<ObjectHeader *>(curPtr);
167 size_t objectSize = GetObjectSize(curPtr);
168 objectVisitor(objectHeader);
169 curPtr = ToVoidPtr(AlignUp(ToUintPtr(curPtr) + objectSize, DEFAULT_ALIGNMENT_IN_BYTES));
170 }
171 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
172 if constexpr (USE_TLABS) {
173 LOG_BUMP_ALLOCATOR(DEBUG) << "Iterate over TLABs";
174 // Iterate over objects in TLABs:
175 tlabManager_.IterateOverTLABs([&objectVisitor](TLAB *tlab) {
176 tlab->IterateOverObjects(objectVisitor);
177 return true;
178 });
179 LOG_BUMP_ALLOCATOR(DEBUG) << "Iterate over TLABs finished";
180 }
181 LOG_BUMP_ALLOCATOR(DEBUG) << "Iteration over objects finished";
182 }
183
184 template <typename AllocConfigT, typename LockConfigT, bool USE_TLABS>
185 template <typename MemVisitor>
IterateOverObjectsInRange(const MemVisitor & memVisitor,void * leftBorder,void * rightBorder)186 void BumpPointerAllocator<AllocConfigT, LockConfigT, USE_TLABS>::IterateOverObjectsInRange(const MemVisitor &memVisitor,
187 void *leftBorder,
188 void *rightBorder)
189 {
190 ASSERT(ToUintPtr(rightBorder) >= ToUintPtr(leftBorder));
191 // NOTE(ipetrov): These are temporary asserts because we can't do anything
192 // if the range crosses different allocators memory pools
193 ASSERT(ToUintPtr(rightBorder) - ToUintPtr(leftBorder) == (CrossingMapSingleton::GetCrossingMapGranularity() - 1U));
194 ASSERT((ToUintPtr(rightBorder) & (~(CrossingMapSingleton::GetCrossingMapGranularity() - 1U))) ==
195 (ToUintPtr(leftBorder) & (~(CrossingMapSingleton::GetCrossingMapGranularity() - 1U))));
196
197 os::memory::LockHolder lock(allocatorLock_);
198 LOG_BUMP_ALLOCATOR(DEBUG) << "IterateOverObjectsInRange for range [" << std::hex << leftBorder << ", "
199 << rightBorder << "]";
200 MemRange inputMemRange(ToUintPtr(leftBorder), ToUintPtr(rightBorder));
201 MemRange arenaOccupiedMemRange(0U, 0U);
202 if (arena_.GetOccupiedSize() > 0) {
203 arenaOccupiedMemRange =
204 MemRange(ToUintPtr(arena_.GetAllocatedStart()), ToUintPtr(arena_.GetAllocatedEnd()) - 1);
205 }
206 // In this case, we iterate over objects in intersection of memory range of occupied memory via arena_.Alloc()
207 // and memory range of input range
208 if (arenaOccupiedMemRange.IsIntersect(inputMemRange)) {
209 void *startPtr = ToVoidPtr(std::max(inputMemRange.GetStartAddress(), arenaOccupiedMemRange.GetStartAddress()));
210 void *endPtr = ToVoidPtr(std::min(inputMemRange.GetEndAddress(), arenaOccupiedMemRange.GetEndAddress()));
211
212 void *objAddr = AllocConfigT::FindFirstObjInCrossingMap(startPtr, endPtr);
213 if (objAddr != nullptr) {
214 ASSERT(arenaOccupiedMemRange.GetStartAddress() <= ToUintPtr(objAddr) &&
215 ToUintPtr(objAddr) <= arenaOccupiedMemRange.GetEndAddress());
216 void *currentPtr = objAddr;
217 while (currentPtr < endPtr) {
218 auto *objectHeader = static_cast<ObjectHeader *>(currentPtr);
219 size_t objectSize = GetObjectSize(currentPtr);
220 memVisitor(objectHeader);
221 currentPtr = ToVoidPtr(AlignUp(ToUintPtr(currentPtr) + objectSize, DEFAULT_ALIGNMENT_IN_BYTES));
222 }
223 }
224 }
225 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
226 if constexpr (USE_TLABS) {
227 // If we didn't allocate any TLAB then we don't need iterate by TLABs
228 if (tlabManager_.GetTLABsOccupiedSize() == 0) {
229 return;
230 }
231 uintptr_t endOfArena = ToUintPtr(arena_.GetArenaEnd());
232 uintptr_t startTlab = endOfArena - tlabManager_.GetTLABsOccupiedSize();
233 MemRange tlabsMemRange(startTlab, endOfArena - 1);
234 // In this case, we iterate over objects in intersection of memory range of TLABs
235 // and memory range of input range
236 if (tlabsMemRange.IsIntersect(inputMemRange)) {
237 void *startPtr = ToVoidPtr(std::max(inputMemRange.GetStartAddress(), tlabsMemRange.GetStartAddress()));
238 void *endPtr = ToVoidPtr(std::min(inputMemRange.GetEndAddress(), tlabsMemRange.GetEndAddress()));
239 tlabManager_.IterateOverTLABs(
240 [&memVisitor, memRange = MemRange(ToUintPtr(startPtr), ToUintPtr(endPtr))](TLAB *tlab) -> bool {
241 tlab->IterateOverObjectsInRange(memVisitor, memRange);
242 return true;
243 });
244 }
245 }
246 }
247
248 template <typename AllocConfigT, typename LockConfigT, bool USE_TLABS>
GetMemRange()249 MemRange BumpPointerAllocator<AllocConfigT, LockConfigT, USE_TLABS>::GetMemRange()
250 {
251 return MemRange(ToUintPtr(arena_.GetAllocatedStart()), ToUintPtr(arena_.GetArenaEnd()) - 1);
252 }
253
254 template <typename AllocConfigT, typename LockConfigT, bool USE_TLABS>
255 template <typename ObjectMoveVisitorT>
CollectAndMove(const GCObjectVisitor & deathChecker,const ObjectMoveVisitorT & objectMoveVisitor)256 void BumpPointerAllocator<AllocConfigT, LockConfigT, USE_TLABS>::CollectAndMove(
257 const GCObjectVisitor &deathChecker, const ObjectMoveVisitorT &objectMoveVisitor)
258 {
259 IterateOverObjects([&](ObjectHeader *objectHeader) {
260 // We are interested only in moving alive objects, after that we cleanup arena
261 if (deathChecker(objectHeader) == ObjectStatus::ALIVE_OBJECT) {
262 objectMoveVisitor(objectHeader);
263 }
264 });
265 }
266
267 template <typename AllocConfigT, typename LockConfigT, bool USE_TLABS>
ContainObject(const ObjectHeader * obj)268 bool BumpPointerAllocator<AllocConfigT, LockConfigT, USE_TLABS>::ContainObject(const ObjectHeader *obj)
269 {
270 bool result = false;
271 result = arena_.InArena(const_cast<ObjectHeader *>(obj));
272 if ((USE_TLABS) && (!result)) {
273 // Check TLABs
274 tlabManager_.IterateOverTLABs([&](TLAB *tlab) {
275 result = tlab->ContainObject(obj);
276 return !result;
277 });
278 }
279 return result;
280 }
281
282 template <typename AllocConfigT, typename LockConfigT, bool USE_TLABS>
IsLive(const ObjectHeader * obj)283 bool BumpPointerAllocator<AllocConfigT, LockConfigT, USE_TLABS>::IsLive(const ObjectHeader *obj)
284 {
285 ASSERT(ContainObject(obj));
286 void *objMem = static_cast<void *>(const_cast<ObjectHeader *>(obj));
287 if (arena_.InArena(objMem)) {
288 void *currentObj = AllocConfigT::FindFirstObjInCrossingMap(objMem, objMem);
289 if (UNLIKELY(currentObj == nullptr)) {
290 return false;
291 }
292 while (currentObj < objMem) {
293 size_t objectSize = GetObjectSize(currentObj);
294 currentObj = ToVoidPtr(AlignUp(ToUintPtr(currentObj) + objectSize, DEFAULT_ALIGNMENT_IN_BYTES));
295 }
296 return currentObj == objMem;
297 }
298 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
299 if constexpr (USE_TLABS) {
300 bool result = false;
301 auto objectVisitor = [&result, obj](ObjectHeader *objectHeader) {
302 if (objectHeader == obj) {
303 result = true;
304 }
305 };
306 auto tlabVisitor = [&objectVisitor, obj](TLAB *tlab) {
307 if (tlab->ContainObject(obj)) {
308 tlab->IterateOverObjects(objectVisitor);
309 return false;
310 }
311 return true;
312 };
313 tlabManager_.IterateOverTLABs(tlabVisitor);
314 return result;
315 }
316 return false;
317 }
318
319 #undef LOG_BUMP_ALLOCATOR
320
321 } // namespace ark::mem
322
323 #endif // RUNTIME_MEM_PANDA_BUMP_ALLOCATOR_INL_H
324