1 /**
2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #ifndef RUNTIME_MEM_PANDA_BUMP_ALLOCATOR_INL_H
16 #define RUNTIME_MEM_PANDA_BUMP_ALLOCATOR_INL_H
17
18 #include "libpandabase/utils/logger.h"
19 #include "runtime/include/mem/allocator.h"
20 #include "runtime/mem/bump-allocator.h"
21 #include "runtime/mem/object_helpers.h"
22 #include "runtime/mem/alloc_config.h"
23
24 namespace panda::mem {
25
26 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
27 #define LOG_BUMP_ALLOCATOR(level) LOG(level, ALLOC) << "BumpPointerAllocator: "
28
29 template <typename AllocConfigT, typename LockConfigT, bool UseTlabs>
BumpPointerAllocator(Pool pool,SpaceType type_allocation,MemStatsType * mem_stats,size_t tlabs_max_count)30 BumpPointerAllocator<AllocConfigT, LockConfigT, UseTlabs>::BumpPointerAllocator(Pool pool, SpaceType type_allocation,
31 MemStatsType *mem_stats,
32 size_t tlabs_max_count)
33 : arena_(pool.GetSize(), pool.GetMem()),
34 tlab_manager_(tlabs_max_count),
35 type_allocation_(type_allocation),
36 mem_stats_(mem_stats)
37 {
38 LOG_BUMP_ALLOCATOR(DEBUG) << "Initializing of BumpPointerAllocator";
39 AllocConfigT::InitializeCrossingMapForMemory(pool.GetMem(), arena_.GetSize());
40 LOG_BUMP_ALLOCATOR(DEBUG) << "Initializing of BumpPointerAllocator finished";
41 ASSERT(UseTlabs ? tlabs_max_count > 0 : tlabs_max_count == 0);
42 }
43
44 template <typename AllocConfigT, typename LockConfigT, bool UseTlabs>
~BumpPointerAllocator()45 BumpPointerAllocator<AllocConfigT, LockConfigT, UseTlabs>::~BumpPointerAllocator()
46 {
47 LOG_BUMP_ALLOCATOR(DEBUG) << "Destroying of BumpPointerAllocator";
48 LOG_BUMP_ALLOCATOR(DEBUG) << "Destroying of BumpPointerAllocator finished";
49 }
50
51 template <typename AllocConfigT, typename LockConfigT, bool UseTlabs>
Reset()52 void BumpPointerAllocator<AllocConfigT, LockConfigT, UseTlabs>::Reset()
53 {
54 // Remove CrossingMap and create for avoid check in Alloc method
55 if (LIKELY(arena_.GetOccupiedSize() > 0)) {
56 AllocConfigT::RemoveCrossingMapForMemory(arena_.GetMem(), arena_.GetSize());
57 AllocConfigT::InitializeCrossingMapForMemory(arena_.GetMem(), arena_.GetSize());
58 }
59 arena_.Reset();
60 if constexpr (UseTlabs) {
61 tlab_manager_.Reset();
62 }
63 }
64
65 template <typename AllocConfigT, typename LockConfigT, bool UseTlabs>
ExpandMemory(void * mem,size_t size)66 void BumpPointerAllocator<AllocConfigT, LockConfigT, UseTlabs>::ExpandMemory(void *mem, size_t size)
67 {
68 LOG_BUMP_ALLOCATOR(DEBUG) << "Expand memory: Add " << std::dec << size << " bytes of memory at addr " << std::hex
69 << mem;
70 ASSERT(ToUintPtr(arena_.GetArenaEnd()) == ToUintPtr(mem));
71 if constexpr (UseTlabs) {
72 UNREACHABLE();
73 }
74 arena_.ExpandArena(mem, size);
75 AllocConfigT::InitializeCrossingMapForMemory(mem, size);
76 }
77
78 template <typename AllocConfigT, typename LockConfigT, bool UseTlabs>
Alloc(size_t size,Alignment alignment)79 void *BumpPointerAllocator<AllocConfigT, LockConfigT, UseTlabs>::Alloc(size_t size, Alignment alignment)
80 {
81 os::memory::LockHolder lock(allocator_lock_);
82 LOG_BUMP_ALLOCATOR(DEBUG) << "Try to allocate " << std::dec << size << " bytes of memory";
83 ASSERT(alignment == DEFAULT_ALIGNMENT);
84 // We need to align up it here to write correct used memory size inside MemStats.
85 // (each element allocated via BumpPointer allocator has DEFAULT_ALIGNMENT alignment).
86 size = AlignUp(size, DEFAULT_ALIGNMENT_IN_BYTES);
87 void *mem = nullptr;
88 // NOLINTNEXTLINE(readability-braces-around-statements)
89 if constexpr (!UseTlabs) {
90 // Use common scenario
91 mem = arena_.Alloc(size, alignment);
92 // NOLINTNEXTLINE(readability-misleading-indentation)
93 } else {
94 // We must take TLABs occupied memory into account.
95 ASSERT(arena_.GetFreeSize() >= tlab_manager_.GetTLABsOccupiedSize());
96 if (arena_.GetFreeSize() - tlab_manager_.GetTLABsOccupiedSize() >= size) {
97 mem = arena_.Alloc(size, alignment);
98 }
99 }
100 if (mem == nullptr) {
101 LOG_BUMP_ALLOCATOR(DEBUG) << "Couldn't allocate memory";
102 return nullptr;
103 }
104 AllocConfigT::OnAlloc(size, type_allocation_, mem_stats_);
105 AllocConfigT::AddToCrossingMap(mem, size);
106 AllocConfigT::MemoryInit(mem, size);
107 return mem;
108 }
109
110 template <typename AllocConfigT, typename LockConfigT, bool UseTlabs>
CreateNewTLAB(size_t size)111 TLAB *BumpPointerAllocator<AllocConfigT, LockConfigT, UseTlabs>::CreateNewTLAB(size_t size)
112 {
113 os::memory::LockHolder lock(allocator_lock_);
114 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
115 if constexpr (!UseTlabs) {
116 UNREACHABLE();
117 }
118 LOG_BUMP_ALLOCATOR(DEBUG) << "Try to create a TLAB with size " << std::dec << size;
119 ASSERT(size == AlignUp(size, DEFAULT_ALIGNMENT_IN_BYTES));
120 TLAB *tlab = nullptr;
121 ASSERT(arena_.GetFreeSize() >= tlab_manager_.GetTLABsOccupiedSize());
122 if (arena_.GetFreeSize() - tlab_manager_.GetTLABsOccupiedSize() >= size) {
123 tlab = tlab_manager_.GetUnusedTLABInstance();
124 if (tlab != nullptr) {
125 tlab_manager_.IncreaseTLABsOccupiedSize(size);
126 uintptr_t end_of_arena = ToUintPtr(arena_.GetArenaEnd());
127 ASSERT(end_of_arena >= tlab_manager_.GetTLABsOccupiedSize());
128 void *tlab_buffer_start = ToVoidPtr(end_of_arena - tlab_manager_.GetTLABsOccupiedSize());
129 ASAN_UNPOISON_MEMORY_REGION(tlab_buffer_start, size);
130 AllocConfigT::MemoryInit(tlab_buffer_start, size);
131 tlab->Fill(tlab_buffer_start, size);
132 LOG_BUMP_ALLOCATOR(DEBUG) << "Created new TLAB with size " << std::dec << size << " at addr " << std::hex
133 << tlab_buffer_start;
134 } else {
135 LOG_BUMP_ALLOCATOR(DEBUG) << "Reached the limit of TLABs inside the allocator";
136 }
137 } else {
138 LOG_BUMP_ALLOCATOR(DEBUG) << "Don't have enough memory for new TLAB with size " << std::dec << size;
139 }
140 return tlab;
141 }
142
143 template <typename AllocConfigT, typename LockConfigT, bool UseTlabs>
VisitAndRemoveAllPools(const MemVisitor & mem_visitor)144 void BumpPointerAllocator<AllocConfigT, LockConfigT, UseTlabs>::VisitAndRemoveAllPools(const MemVisitor &mem_visitor)
145 {
146 os::memory::LockHolder lock(allocator_lock_);
147 AllocConfigT::RemoveCrossingMapForMemory(arena_.GetMem(), arena_.GetSize());
148 mem_visitor(arena_.GetMem(), arena_.GetSize());
149 }
150
151 template <typename AllocConfigT, typename LockConfigT, bool UseTlabs>
VisitAndRemoveFreePools(const MemVisitor & mem_visitor)152 void BumpPointerAllocator<AllocConfigT, LockConfigT, UseTlabs>::VisitAndRemoveFreePools(const MemVisitor &mem_visitor)
153 {
154 (void)mem_visitor;
155 os::memory::LockHolder lock(allocator_lock_);
156 // We should do nothing here
157 }
158
159 template <typename AllocConfigT, typename LockConfigT, bool UseTlabs>
IterateOverObjects(const std::function<void (ObjectHeader * object_header)> & object_visitor)160 void BumpPointerAllocator<AllocConfigT, LockConfigT, UseTlabs>::IterateOverObjects(
161 const std::function<void(ObjectHeader *object_header)> &object_visitor)
162 {
163 os::memory::LockHolder lock(allocator_lock_);
164 LOG_BUMP_ALLOCATOR(DEBUG) << "Iteration over objects started";
165 void *cur_ptr = arena_.GetAllocatedStart();
166 void *end_ptr = arena_.GetAllocatedEnd();
167 while (cur_ptr < end_ptr) {
168 auto object_header = static_cast<ObjectHeader *>(cur_ptr);
169 size_t object_size = GetObjectSize(cur_ptr);
170 object_visitor(object_header);
171 cur_ptr = ToVoidPtr(AlignUp(ToUintPtr(cur_ptr) + object_size, DEFAULT_ALIGNMENT_IN_BYTES));
172 }
173 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
174 if constexpr (UseTlabs) {
175 LOG_BUMP_ALLOCATOR(DEBUG) << "Iterate over TLABs";
176 // Iterate over objects in TLABs:
177 tlab_manager_.IterateOverTLABs([&](TLAB *tlab) {
178 tlab->IterateOverObjects(object_visitor);
179 return true;
180 });
181 LOG_BUMP_ALLOCATOR(DEBUG) << "Iterate over TLABs finished";
182 }
183 LOG_BUMP_ALLOCATOR(DEBUG) << "Iteration over objects finished";
184 }
185
186 template <typename AllocConfigT, typename LockConfigT, bool UseTlabs>
187 template <typename MemVisitor>
IterateOverObjectsInRange(const MemVisitor & mem_visitor,void * left_border,void * right_border)188 void BumpPointerAllocator<AllocConfigT, LockConfigT, UseTlabs>::IterateOverObjectsInRange(const MemVisitor &mem_visitor,
189 void *left_border,
190 void *right_border)
191 {
192 ASSERT(ToUintPtr(right_border) >= ToUintPtr(left_border));
193 // TODO(ipetrov): These are temporary asserts because we can't do anything
194 // if the range crosses different allocators memory pools
195 ASSERT(ToUintPtr(right_border) - ToUintPtr(left_border) ==
196 (CrossingMapSingleton::GetCrossingMapGranularity() - 1U));
197 ASSERT((ToUintPtr(right_border) & (~(CrossingMapSingleton::GetCrossingMapGranularity() - 1U))) ==
198 (ToUintPtr(left_border) & (~(CrossingMapSingleton::GetCrossingMapGranularity() - 1U))));
199
200 os::memory::LockHolder lock(allocator_lock_);
201 LOG_BUMP_ALLOCATOR(DEBUG) << "IterateOverObjectsInRange for range [" << std::hex << left_border << ", "
202 << right_border << "]";
203 MemRange input_mem_range(ToUintPtr(left_border), ToUintPtr(right_border));
204 MemRange arena_occupied_mem_range(0U, 0U);
205 if (arena_.GetOccupiedSize() > 0) {
206 arena_occupied_mem_range =
207 MemRange(ToUintPtr(arena_.GetAllocatedStart()), ToUintPtr(arena_.GetAllocatedEnd()) - 1);
208 }
209 // In this case, we iterate over objects in intersection of memory range of occupied memory via arena_.Alloc()
210 // and memory range of input range
211 if (arena_occupied_mem_range.IsIntersect(input_mem_range)) {
212 void *start_ptr =
213 ToVoidPtr(std::max(input_mem_range.GetStartAddress(), arena_occupied_mem_range.GetStartAddress()));
214 void *end_ptr = ToVoidPtr(std::min(input_mem_range.GetEndAddress(), arena_occupied_mem_range.GetEndAddress()));
215
216 void *obj_addr = AllocConfigT::FindFirstObjInCrossingMap(start_ptr, end_ptr);
217 if (obj_addr != nullptr) {
218 ASSERT(arena_occupied_mem_range.GetStartAddress() <= ToUintPtr(obj_addr) &&
219 ToUintPtr(obj_addr) <= arena_occupied_mem_range.GetEndAddress());
220 void *current_ptr = obj_addr;
221 while (current_ptr < end_ptr) {
222 auto *object_header = static_cast<ObjectHeader *>(current_ptr);
223 size_t object_size = GetObjectSize(current_ptr);
224 mem_visitor(object_header);
225 current_ptr = ToVoidPtr(AlignUp(ToUintPtr(current_ptr) + object_size, DEFAULT_ALIGNMENT_IN_BYTES));
226 }
227 }
228 }
229 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
230 if constexpr (UseTlabs) {
231 // If we didn't allocate any TLAB then we don't need iterate by TLABs
232 if (tlab_manager_.GetTLABsOccupiedSize() == 0) {
233 return;
234 }
235 uintptr_t end_of_arena = ToUintPtr(arena_.GetArenaEnd());
236 uintptr_t start_tlab = end_of_arena - tlab_manager_.GetTLABsOccupiedSize();
237 MemRange tlabs_mem_range(start_tlab, end_of_arena - 1);
238 // In this case, we iterate over objects in intersection of memory range of TLABs
239 // and memory range of input range
240 if (tlabs_mem_range.IsIntersect(input_mem_range)) {
241 void *start_ptr = ToVoidPtr(std::max(input_mem_range.GetStartAddress(), tlabs_mem_range.GetStartAddress()));
242 void *end_ptr = ToVoidPtr(std::min(input_mem_range.GetEndAddress(), tlabs_mem_range.GetEndAddress()));
243 tlab_manager_.IterateOverTLABs(
244 [&mem_visitor, mem_range = MemRange(ToUintPtr(start_ptr), ToUintPtr(end_ptr))](TLAB *tlab) -> bool {
245 tlab->IterateOverObjectsInRange(mem_visitor, mem_range);
246 return true;
247 });
248 }
249 }
250 }
251
252 template <typename AllocConfigT, typename LockConfigT, bool UseTlabs>
GetMemRange()253 MemRange BumpPointerAllocator<AllocConfigT, LockConfigT, UseTlabs>::GetMemRange()
254 {
255 return MemRange(ToUintPtr(arena_.GetAllocatedStart()), ToUintPtr(arena_.GetArenaEnd()) - 1);
256 }
257
258 template <typename AllocConfigT, typename LockConfigT, bool UseTlabs>
259 template <typename ObjectMoveVisitorT>
CollectAndMove(const GCObjectVisitor & death_checker,const ObjectMoveVisitorT & object_move_visitor)260 void BumpPointerAllocator<AllocConfigT, LockConfigT, UseTlabs>::CollectAndMove(
261 const GCObjectVisitor &death_checker, const ObjectMoveVisitorT &object_move_visitor)
262 {
263 IterateOverObjects([&](ObjectHeader *object_header) {
264 // We are interested only in moving alive objects, after that we cleanup arena
265 if (death_checker(object_header) == ObjectStatus::ALIVE_OBJECT) {
266 object_move_visitor(object_header);
267 }
268 });
269 }
270
271 template <typename AllocConfigT, typename LockConfigT, bool UseTlabs>
ContainObject(const ObjectHeader * obj)272 bool BumpPointerAllocator<AllocConfigT, LockConfigT, UseTlabs>::ContainObject(const ObjectHeader *obj)
273 {
274 bool result = false;
275 result = arena_.InArena(const_cast<ObjectHeader *>(obj));
276 if ((UseTlabs) && (!result)) {
277 // Check TLABs
278 tlab_manager_.IterateOverTLABs([&](TLAB *tlab) {
279 result = tlab->ContainObject(obj);
280 return !result;
281 });
282 }
283 return result;
284 }
285
286 template <typename AllocConfigT, typename LockConfigT, bool UseTlabs>
IsLive(const ObjectHeader * obj)287 bool BumpPointerAllocator<AllocConfigT, LockConfigT, UseTlabs>::IsLive(const ObjectHeader *obj)
288 {
289 ASSERT(ContainObject(obj));
290 void *obj_mem = static_cast<void *>(const_cast<ObjectHeader *>(obj));
291 if (arena_.InArena(obj_mem)) {
292 void *current_obj = AllocConfigT::FindFirstObjInCrossingMap(obj_mem, obj_mem);
293 if (UNLIKELY(current_obj == nullptr)) {
294 return false;
295 }
296 while (current_obj < obj_mem) {
297 size_t object_size = GetObjectSize(current_obj);
298 current_obj = ToVoidPtr(AlignUp(ToUintPtr(current_obj) + object_size, DEFAULT_ALIGNMENT_IN_BYTES));
299 }
300 return current_obj == obj_mem;
301 }
302 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
303 if constexpr (UseTlabs) {
304 bool result = false;
305 tlab_manager_.IterateOverTLABs([&](TLAB *tlab) {
306 if (tlab->ContainObject(obj)) {
307 tlab->IterateOverObjects([&](ObjectHeader *object_header) {
308 if (object_header == obj) {
309 result = true;
310 }
311 });
312 return false;
313 }
314 return true;
315 });
316 return result;
317 }
318 return false;
319 }
320
321 #undef LOG_BUMP_ALLOCATOR
322
323 } // namespace panda::mem
324
325 #endif // RUNTIME_MEM_PANDA_BUMP_ALLOCATOR_INL_H
326