1 /*
2 * Copyright (c) 2021 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #ifndef PANDA_RUNTIME_MEM_RUNSLOTS_ALLOCATOR_INL_H_
17 #define PANDA_RUNTIME_MEM_RUNSLOTS_ALLOCATOR_INL_H_
18
19 #include <securec.h>
20 #include "libpandabase/utils/asan_interface.h"
21 #include "runtime/mem/alloc_config.h"
22 #include "runtime/mem/object_helpers.h"
23 #include "runtime/mem/runslots_allocator.h"
24
25 namespace panda::mem {
26
27 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
28 #define LOG_RUNSLOTS_ALLOCATOR(level) LOG(level, ALLOC) << "RunSlotsAllocator: "
29
30 template <typename AllocConfigT, typename LockConfigT>
RunSlotsAllocator(MemStatsType * mem_stats,SpaceType type_allocation)31 inline RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsAllocator(MemStatsType *mem_stats,
32 SpaceType type_allocation)
33 : type_allocation_(type_allocation), mem_stats_(mem_stats)
34 {
35 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Initializing RunSlotsAllocator";
36 LOG_RUNSLOTS_ALLOCATOR(INFO) << "Initializing RunSlotsAllocator finished";
37 }
38
39 template <typename AllocConfigT, typename LockConfigT>
~RunSlotsAllocator()40 inline RunSlotsAllocator<AllocConfigT, LockConfigT>::~RunSlotsAllocator()
41 {
42 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Destroying RunSlotsAllocator";
43 LOG_RUNSLOTS_ALLOCATOR(INFO) << "Destroying RunSlotsAllocator finished";
44 }
45
46 template <typename AllocConfigT, typename LockConfigT>
47 template <bool disable_use_free_runslots>
Alloc(size_t size,Alignment align)48 inline void *RunSlotsAllocator<AllocConfigT, LockConfigT>::Alloc(size_t size, Alignment align)
49 {
50 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Try to allocate " << size << " bytes of memory with align " << align;
51 if (size == 0) {
52 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Failed to allocate - size of object is null";
53 return nullptr;
54 }
55 size_t alignment_size = GetAlignmentInBytes(align);
56 if (alignment_size > size) {
57 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Change size of allocation to " << alignment_size
58 << " bytes because of alignment";
59 size = alignment_size;
60 }
61 if (size > RunSlotsType::MaxSlotSize()) {
62 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Failed to allocate - size of object is too big";
63 return nullptr;
64 }
65 size_t slot_size_power_of_two = RunSlotsType::ConvertToPowerOfTwoUnsafe(size);
66 size_t array_index = slot_size_power_of_two;
67 const size_t run_slot_size = 1UL << slot_size_power_of_two;
68 RunSlotsType *runslots = nullptr;
69 bool used_from_freed_runslots_list = false;
70 {
71 os::memory::LockHolder list_lock(*runslots_[array_index].GetLock());
72 runslots = runslots_[array_index].PopFromHead();
73 }
74 if (runslots == nullptr) {
75 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "We don't have free RunSlots for size " << run_slot_size
76 << ". Try to get new one.";
77 if (disable_use_free_runslots) {
78 return nullptr;
79 }
80 {
81 os::memory::LockHolder list_lock(*free_runslots_.GetLock());
82 runslots = free_runslots_.PopFromHead();
83 }
84 if (runslots != nullptr) {
85 used_from_freed_runslots_list = true;
86 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Get RunSlots from free list";
87 } else {
88 LOG_RUNSLOTS_ALLOCATOR(DEBUG)
89 << "Failed to get new RunSlots from free list, try to allocate one from memory";
90 runslots = CreateNewRunSlotsFromMemory(run_slot_size);
91 if (runslots == nullptr) {
92 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Failed to allocate an object, couldn't create RunSlots";
93 return nullptr;
94 }
95 }
96 }
97 void *allocated_mem = nullptr;
98 {
99 os::memory::LockHolder runslots_lock(*runslots->GetLock());
100 if (used_from_freed_runslots_list) {
101 // we will have a perf issue here. Maybe it is better to delete free_runslots_?
102 if (runslots->GetSlotsSize() != run_slot_size) {
103 runslots->Initialize(run_slot_size, runslots->GetPoolPointer(), false);
104 }
105 }
106 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Used runslots with addr " << std::hex << runslots;
107 allocated_mem = static_cast<void *>(runslots->PopFreeSlot());
108 if (allocated_mem == nullptr) {
109 UNREACHABLE();
110 }
111 LOG_RUNSLOTS_ALLOCATOR(INFO) << "Allocate a memory at address " << std::hex << allocated_mem;
112 if (!runslots->IsFull()) {
113 os::memory::LockHolder list_lock(*runslots_[array_index].GetLock());
114 // We didn't take the last free slot from this RunSlots
115 runslots_[array_index].PushToTail(runslots);
116 }
117 ASAN_UNPOISON_MEMORY_REGION(allocated_mem, size);
118 AllocConfigT::OnAlloc(run_slot_size, type_allocation_, mem_stats_);
119 AllocConfigT::MemoryInit(allocated_mem, size);
120 }
121 return allocated_mem;
122 }
123
124 template <typename AllocConfigT, typename LockConfigT>
Free(void * mem)125 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::Free(void *mem)
126 {
127 FreeUnsafe<true>(mem);
128 }
129
130 template <typename AllocConfigT, typename LockConfigT>
ReleaseEmptyRunSlotsPagesUnsafe()131 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::ReleaseEmptyRunSlotsPagesUnsafe()
132 {
133 // Iterate over free_runslots list:
134 RunSlotsType *cur_free_runslots = nullptr;
135 {
136 os::memory::LockHolder list_lock(*free_runslots_.GetLock());
137 cur_free_runslots = free_runslots_.PopFromHead();
138 }
139 while (cur_free_runslots != nullptr) {
140 memory_pool_.ReturnAndReleaseRunSlotsMemory(cur_free_runslots);
141
142 {
143 os::memory::LockHolder list_lock(*free_runslots_.GetLock());
144 cur_free_runslots = free_runslots_.PopFromHead();
145 }
146 }
147 }
148
149 template <typename AllocConfigT, typename LockConfigT>
FreeUnsafeInternal(RunSlotsType * runslots,void * mem)150 inline bool RunSlotsAllocator<AllocConfigT, LockConfigT>::FreeUnsafeInternal(RunSlotsType *runslots, void *mem)
151 {
152 bool need_to_add_to_free_list = false;
153 const size_t run_slot_size = runslots->GetSlotsSize();
154 size_t array_index = RunSlotsType::ConvertToPowerOfTwoUnsafe(run_slot_size);
155 bool runslots_was_full = runslots->IsFull();
156 runslots->PushFreeSlot(static_cast<FreeSlot *>(mem));
157 /**
158 * RunSlotsAllocator doesn't know this real size which we use in slot, so we record upper bound - size of the
159 * slot.
160 */
161 AllocConfigT::OnFree(run_slot_size, type_allocation_, mem_stats_);
162 ASAN_POISON_MEMORY_REGION(mem, run_slot_size);
163 ASSERT(!(runslots_was_full && runslots->IsEmpty())); // Runslots has more that one slot inside.
164 if (runslots_was_full) {
165 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "This RunSlots was full and now we must add it to the RunSlots list";
166
167 os::memory::LockHolder list_lock(*runslots_[array_index].GetLock());
168 #if !defined(FAST_VERIFY) // this assert is very expensive, takes too much time for some tests in FastVerify mode
169 ASSERT(!runslots_[array_index].IsInThisList(runslots));
170 #endif
171 runslots_[array_index].PushToTail(runslots);
172 } else if (runslots->IsEmpty()) {
173 os::memory::LockHolder list_lock(*runslots_[array_index].GetLock());
174 // Check that we may took this runslots from list on alloc
175 // and waiting for lock
176 if ((runslots->GetNextRunSlots() != nullptr) || (runslots->GetPrevRunSlots() != nullptr) ||
177 (runslots_[array_index].GetHead() == runslots)) {
178 LOG_RUNSLOTS_ALLOCATOR(DEBUG)
179 << "This RunSlots is empty. Pop it from runslots list and push it to free_runslots_";
180 runslots_[array_index].PopFromList(runslots);
181 need_to_add_to_free_list = true;
182 }
183 }
184
185 return need_to_add_to_free_list;
186 }
187
188 template <typename AllocConfigT, typename LockConfigT>
189 template <bool LockRunSlots>
FreeUnsafe(void * mem)190 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::FreeUnsafe(void *mem)
191 {
192 if (UNLIKELY(mem == nullptr)) {
193 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Try to free memory at invalid addr 0";
194 return;
195 }
196 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Try to free object at address " << std::hex << mem;
197 #ifndef NDEBUG
198 if (!AllocatedByRunSlotsAllocatorUnsafe(mem)) {
199 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "This object was not allocated by this allocator";
200 return;
201 }
202 #endif // !NDEBUG
203
204 // Now we 100% sure that this object was allocated by RunSlots allocator.
205 // We can just do alignment for this address and get a pointer to RunSlots header
206 uintptr_t runslots_addr = (ToUintPtr(mem) >> RUNSLOTS_ALIGNMENT) << RUNSLOTS_ALIGNMENT;
207 auto runslots = static_cast<RunSlotsType *>(ToVoidPtr(runslots_addr));
208 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "It is RunSlots with addr " << std::hex << static_cast<void *>(runslots);
209
210 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
211 if constexpr (LockRunSlots) {
212 runslots->GetLock()->Lock();
213 }
214
215 bool need_to_add_to_free_list = FreeUnsafeInternal(runslots, mem);
216
217 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
218 if constexpr (LockRunSlots) {
219 runslots->GetLock()->Unlock();
220 }
221
222 if (need_to_add_to_free_list) {
223 os::memory::LockHolder list_lock(*free_runslots_.GetLock());
224 free_runslots_.PushToTail(runslots);
225 }
226 LOG_RUNSLOTS_ALLOCATOR(INFO) << "Freed object at address " << std::hex << mem;
227 }
228
229 template <typename AllocConfigT, typename LockConfigT>
Collect(const GCObjectVisitor & death_checker_fn)230 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::Collect(const GCObjectVisitor &death_checker_fn)
231 {
232 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Collecting for RunSlots allocator started";
233 IterateOverObjects([&](ObjectHeader *object_header) {
234 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " iterate over " << std::hex << object_header;
235 if (death_checker_fn(object_header) == ObjectStatus::DEAD_OBJECT) {
236 LOG(DEBUG, GC) << "DELETE OBJECT " << GetDebugInfoAboutObject(object_header);
237 FreeUnsafe<false>(object_header);
238 }
239 });
240 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Collecting for RunSlots allocator finished";
241 }
242
243 template <typename AllocConfigT, typename LockConfigT>
244 template <typename ObjectVisitor>
IterateOverObjects(const ObjectVisitor & object_visitor)245 void RunSlotsAllocator<AllocConfigT, LockConfigT>::IterateOverObjects(const ObjectVisitor &object_visitor)
246 {
247 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Iteration over objects started";
248 memory_pool_.IterateOverObjects(object_visitor);
249 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Iteration over objects finished";
250 }
251
252 template <typename AllocConfigT, typename LockConfigT>
AllocatedByRunSlotsAllocator(void * object)253 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::AllocatedByRunSlotsAllocator(void *object)
254 {
255 return AllocatedByRunSlotsAllocatorUnsafe(object);
256 }
257
258 template <typename AllocConfigT, typename LockConfigT>
AllocatedByRunSlotsAllocatorUnsafe(void * object)259 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::AllocatedByRunSlotsAllocatorUnsafe(void *object)
260 {
261 return memory_pool_.IsInMemPools(object);
262 }
263
264 template <typename AllocConfigT, typename LockConfigT>
265 inline typename RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsType *
CreateNewRunSlotsFromMemory(size_t slots_size)266 RunSlotsAllocator<AllocConfigT, LockConfigT>::CreateNewRunSlotsFromMemory(size_t slots_size)
267 {
268 RunSlotsType *runslots = memory_pool_.GetNewRunSlots(slots_size);
269 if (runslots != nullptr) {
270 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Take " << RUNSLOTS_SIZE << " bytes of memory for new RunSlots instance from "
271 << std::hex << runslots;
272 return runslots;
273 }
274 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "There is no free memory for RunSlots";
275 return runslots;
276 }
277
278 template <typename AllocConfigT, typename LockConfigT>
AddMemoryPool(void * mem,size_t size)279 inline bool RunSlotsAllocator<AllocConfigT, LockConfigT>::AddMemoryPool(void *mem, size_t size)
280 {
281 LOG_RUNSLOTS_ALLOCATOR(INFO) << "Get new memory pool with size " << size << " bytes, at addr " << std::hex << mem;
282 // Try to add this memory to the memory_pool_
283 if (mem == nullptr) {
284 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Failed to add memory, the memory is nullptr";
285 return false;
286 }
287 if (size > MIN_POOL_SIZE) {
288 // because it is requested for correct freed_runslots_bitmap_
289 // workflow. Fix it in #4018
290 LOG_RUNSLOTS_ALLOCATOR(DEBUG)
291 << "Can't add new memory pool to this allocator because the memory size is equal to " << MIN_POOL_SIZE;
292 return false;
293 }
294 if (!memory_pool_.AddNewMemoryPool(mem, size)) {
295 LOG_RUNSLOTS_ALLOCATOR(DEBUG)
296 << "Can't add new memory pool to this allocator. Maybe we already added too much memory pools.";
297 return false;
298 }
299 return true;
300 }
301
302 template <typename AllocConfigT, typename LockConfigT>
303 template <typename MemVisitor>
VisitAndRemoveAllPools(const MemVisitor & mem_visitor)304 void RunSlotsAllocator<AllocConfigT, LockConfigT>::VisitAndRemoveAllPools(const MemVisitor &mem_visitor)
305 {
306 // We call this method and return pools to the system.
307 // Therefore, delete all objects to clear all external dependences
308 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Clear all objects inside the allocator";
309 memory_pool_.VisitAllPools(mem_visitor);
310 }
311
312 template <typename AllocConfigT, typename LockConfigT>
313 template <typename MemVisitor>
314 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_FUNCTION_DECL_PARENTHESIS_PARAM_TYPE)
VisitAndRemoveFreePools(const MemVisitor & mem_visitor)315 void RunSlotsAllocator<AllocConfigT, LockConfigT>::VisitAndRemoveFreePools([
316 [maybe_unused]] const MemVisitor &mem_visitor)
317 {
318 ReleaseEmptyRunSlotsPagesUnsafe();
319 // We need to remove RunSlots from RunSlotsList
320 // All of them must be inside free_runslots_ list.
321 memory_pool_.VisitAndRemoveFreePools(mem_visitor);
322 }
323
324 template <typename AllocConfigT, typename LockConfigT>
325 template <typename MemVisitor>
IterateOverObjectsInRange(const MemVisitor & mem_visitor,void * left_border,void * right_border)326 void RunSlotsAllocator<AllocConfigT, LockConfigT>::IterateOverObjectsInRange(const MemVisitor &mem_visitor,
327 void *left_border, void *right_border)
328 {
329 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "IterateOverObjectsInRange for range [" << std::hex << left_border << ", "
330 << right_border << "]";
331 ASSERT(ToUintPtr(right_border) >= ToUintPtr(left_border));
332 if (!AllocatedByRunSlotsAllocatorUnsafe(left_border)) {
333 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "This memory range is not covered by this allocator";
334 return;
335 }
336 // if the range crosses different allocators memory pools
337 ASSERT(ToUintPtr(right_border) - ToUintPtr(left_border) ==
338 (CrossingMapSingleton::GetCrossingMapGranularity() - 1U));
339 ASSERT((ToUintPtr(right_border) & (~(CrossingMapSingleton::GetCrossingMapGranularity() - 1U))) ==
340 (ToUintPtr(left_border) & (~(CrossingMapSingleton::GetCrossingMapGranularity() - 1U))));
341 // Now we 100% sure that this left_border was allocated by RunSlots allocator.
342 // We can just do alignment for this address and get a pointer to RunSlots header
343 uintptr_t runslots_addr = (ToUintPtr(left_border) >> RUNSLOTS_ALIGNMENT) << RUNSLOTS_ALIGNMENT;
344 while (runslots_addr < ToUintPtr(right_border)) {
345 auto runslots = static_cast<RunSlotsType *>(ToVoidPtr(runslots_addr));
346 os::memory::LockHolder runslots_lock(*runslots->GetLock());
347 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "IterateOverObjectsInRange, It is RunSlots with addr " << std::hex
348 << static_cast<void *>(runslots);
349 runslots->IterateOverOccupiedSlots(mem_visitor);
350 runslots_addr += RUNSLOTS_SIZE;
351 }
352 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "IterateOverObjectsInRange finished";
353 }
354
355 template <typename AllocConfigT, typename LockConfigT>
VerifyAllocator()356 size_t RunSlotsAllocator<AllocConfigT, LockConfigT>::VerifyAllocator()
357 {
358 size_t fail_cnt = 0;
359 for (size_t i = 0; i < SLOTS_SIZES_VARIANTS; i++) {
360 RunSlotsType *runslots = nullptr;
361 {
362 os::memory::LockHolder list_lock(*runslots_[i].GetLock());
363 runslots = runslots_[i].GetHead();
364 }
365 if (runslots != nullptr) {
366 os::memory::LockHolder runslots_lock(*runslots->GetLock());
367 fail_cnt += runslots->VerifyRun();
368 }
369 }
370 return fail_cnt;
371 }
372
373 template <typename AllocConfigT, typename LockConfigT>
ContainObject(const ObjectHeader * obj)374 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::ContainObject(const ObjectHeader *obj)
375 {
376 return AllocatedByRunSlotsAllocatorUnsafe(const_cast<ObjectHeader *>(obj));
377 }
378
379 template <typename AllocConfigT, typename LockConfigT>
IsLive(const ObjectHeader * obj)380 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::IsLive(const ObjectHeader *obj)
381 {
382 ASSERT(ContainObject(obj) == true);
383 uintptr_t runslots_addr = ToUintPtr(obj) >> RUNSLOTS_ALIGNMENT << RUNSLOTS_ALIGNMENT;
384 auto run = static_cast<RunSlotsType *>(ToVoidPtr(runslots_addr));
385 if (run->IsEmpty()) {
386 return false;
387 }
388 return run->IsLive(obj);
389 }
390
391 template <typename AllocConfigT, typename LockConfigT>
TrimUnsafe()392 void RunSlotsAllocator<AllocConfigT, LockConfigT>::TrimUnsafe()
393 {
394 // release page in free runslots list
395 auto head = free_runslots_.GetHead();
396 while (head != nullptr) {
397 auto next = head->GetNextRunSlots();
398 os::mem::ReleasePages(ToUintPtr(head), ToUintPtr(head) + RUNSLOTS_SIZE);
399 head = next;
400 }
401
402 memory_pool_.VisitAllPoolsWithOccupiedSize([](void *mem, size_t used_size, size_t size) {
403 uintptr_t start = AlignUp(ToUintPtr(mem) + used_size, panda::os::mem::GetPageSize());
404 uintptr_t end = ToUintPtr(mem) + size;
405 if (end >= start + panda::os::mem::GetPageSize()) {
406 os::mem::ReleasePages(start, end);
407 }
408 });
409 }
410
411 template <typename AllocConfigT, typename LockConfigT>
PushToTail(RunSlotsType * runslots)412 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsList::PushToTail(RunSlotsType *runslots)
413 {
414 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Push to tail RunSlots at addr " << std::hex << static_cast<void *>(runslots);
415 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " tail_ " << std::hex << tail_;
416 if (tail_ == nullptr) {
417 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " List was empty, setup head_ and tail_";
418 // this means that head_ == nullptr too
419 head_ = runslots;
420 tail_ = runslots;
421 return;
422 }
423 tail_->SetNextRunSlots(runslots);
424 runslots->SetPrevRunSlots(tail_);
425 tail_ = runslots;
426 tail_->SetNextRunSlots(nullptr);
427 }
428
429 template <typename AllocConfigT, typename LockConfigT>
430 inline typename RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsType *
PopFromHead()431 RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsList::PopFromHead()
432 {
433 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "PopFromHead";
434 if (UNLIKELY(head_ == nullptr)) {
435 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " List is empty, nothing to pop";
436 return nullptr;
437 }
438 RunSlotsType *head_runslots = head_;
439 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " popped from head RunSlots " << std::hex << head_runslots;
440 head_ = head_runslots->GetNextRunSlots();
441 if (head_ == nullptr) {
442 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " Now list is empty";
443 // We pop the last element in the list
444 tail_ = nullptr;
445 } else {
446 head_->SetPrevRunSlots(nullptr);
447 }
448 head_runslots->SetNextRunSlots(nullptr);
449 return head_runslots;
450 }
451
452 template <typename AllocConfigT, typename LockConfigT>
453 inline typename RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsType *
PopFromTail()454 RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsList::PopFromTail()
455 {
456 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "PopFromTail";
457 if (UNLIKELY(tail_ == nullptr)) {
458 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " List is empty, nothing to pop";
459 return nullptr;
460 }
461 RunSlotsType *tail_runslots = tail_;
462 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " popped from tail RunSlots " << std::hex << tail_runslots;
463 tail_ = tail_runslots->GetPrevRunSlots();
464 if (tail_ == nullptr) {
465 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " Now list is empty";
466 // We pop the last element in the list
467 head_ = nullptr;
468 } else {
469 tail_->SetNextRunSlots(nullptr);
470 }
471 tail_runslots->SetPrevRunSlots(nullptr);
472 return tail_runslots;
473 }
474
475 template <typename AllocConfigT, typename LockConfigT>
PopFromList(RunSlotsType * runslots)476 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsList::PopFromList(RunSlotsType *runslots)
477 {
478 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "PopFromList RunSlots with addr " << std::hex << runslots;
479 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "head_ = " << std::hex << head_;
480 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "tail_ = " << std::hex << tail_;
481
482 if (runslots == head_) {
483 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "It is RunSlots from the head.";
484 PopFromHead();
485 return;
486 }
487 if (runslots == tail_) {
488 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "It is RunSlots from the tail.";
489 PopFromTail();
490 return;
491 }
492 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Remove RunSlots from the list.";
493 ASSERT(runslots != nullptr);
494 RunSlotsType *next_runslots = runslots->GetNextRunSlots();
495 RunSlotsType *previous_runslots = runslots->GetPrevRunSlots();
496 ASSERT(next_runslots != nullptr);
497 ASSERT(previous_runslots != nullptr);
498
499 next_runslots->SetPrevRunSlots(previous_runslots);
500 previous_runslots->SetNextRunSlots(next_runslots);
501 runslots->SetNextRunSlots(nullptr);
502 runslots->SetPrevRunSlots(nullptr);
503 }
504
505 template <typename AllocConfigT, typename LockConfigT>
MemPoolManager()506 inline RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::MemPoolManager()
507 {
508 occupied_tail_ = nullptr;
509 free_tail_ = nullptr;
510 partially_occupied_head_ = nullptr;
511 }
512
513 template <typename AllocConfigT, typename LockConfigT>
514 inline typename RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsType *
GetNewRunSlots(size_t slots_size)515 RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::GetNewRunSlots(size_t slots_size)
516 {
517 os::memory::WriteLockHolder wlock(lock_);
518 RunSlotsType *new_runslots = nullptr;
519 if (partially_occupied_head_ != nullptr) {
520 new_runslots = partially_occupied_head_->GetMemoryForRunSlots(slots_size);
521 ASSERT(new_runslots != nullptr);
522 if (UNLIKELY(!partially_occupied_head_->HasMemoryForRunSlots())) {
523 partially_occupied_head_ = partially_occupied_head_->GetNext();
524 ASSERT((partially_occupied_head_ == nullptr) || (partially_occupied_head_->HasMemoryForRunSlots()));
525 }
526 } else if (free_tail_ != nullptr) {
527 LOG_RUNSLOTS_ALLOCATOR(DEBUG)
528 << "MemPoolManager: occupied_tail_ doesn't have memory for RunSlots, get new pool from free pools";
529 PoolListElement *free_element = free_tail_;
530 free_tail_ = free_tail_->GetPrev();
531
532 free_element->PopFromList();
533 free_element->SetPrev(occupied_tail_);
534
535 if (occupied_tail_ != nullptr) {
536 ASSERT(occupied_tail_->GetNext() == nullptr);
537 occupied_tail_->SetNext(free_element);
538 }
539 occupied_tail_ = free_element;
540
541 if (partially_occupied_head_ == nullptr) {
542 partially_occupied_head_ = occupied_tail_;
543 ASSERT(partially_occupied_head_->HasMemoryForRunSlots());
544 }
545
546 ASSERT(occupied_tail_->GetNext() == nullptr);
547 new_runslots = occupied_tail_->GetMemoryForRunSlots(slots_size);
548 ASSERT(new_runslots != nullptr);
549 }
550 return new_runslots;
551 }
552
553 template <typename AllocConfigT, typename LockConfigT>
AddNewMemoryPool(void * mem,size_t size)554 inline bool RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::AddNewMemoryPool(void *mem, size_t size)
555 {
556 os::memory::WriteLockHolder wlock(lock_);
557 PoolListElement *new_pool = PoolListElement::Create(mem, size, free_tail_);
558 if (free_tail_ != nullptr) {
559 ASSERT(free_tail_->GetNext() == nullptr);
560 free_tail_->SetNext(new_pool);
561 }
562 free_tail_ = new_pool;
563 ASAN_POISON_MEMORY_REGION(mem, size);
564 // To not unpoison it every time at access.
565 ASAN_UNPOISON_MEMORY_REGION(mem, sizeof(PoolListElement));
566 return true;
567 }
568
569 template <typename AllocConfigT, typename LockConfigT>
ReturnAndReleaseRunSlotsMemory(RunSlotsType * runslots)570 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::ReturnAndReleaseRunSlotsMemory(
571 RunSlotsType *runslots)
572 {
573 os::memory::WriteLockHolder wlock(lock_);
574 auto pool = static_cast<PoolListElement *>(ToVoidPtr(runslots->GetPoolPointer()));
575 if (!pool->HasMemoryForRunSlots()) {
576 ASSERT(partially_occupied_head_ != pool);
577 // We should move this pool to the end of an occupied list
578 if (pool != occupied_tail_) {
579 pool->PopFromList();
580 pool->SetPrev(occupied_tail_);
581 if (UNLIKELY(occupied_tail_ == nullptr)) {
582 UNREACHABLE();
583 }
584 occupied_tail_->SetNext(pool);
585 occupied_tail_ = pool;
586 } else {
587 ASSERT(partially_occupied_head_ == nullptr);
588 }
589 if (partially_occupied_head_ == nullptr) {
590 partially_occupied_head_ = occupied_tail_;
591 }
592 }
593
594 pool->AddFreedRunSlots(runslots);
595 ASSERT(partially_occupied_head_->HasMemoryForRunSlots());
596
597 // Start address from which we can release pages
598 uintptr_t start_addr = AlignUp(ToUintPtr(runslots), os::mem::GetPageSize());
599 // End address before which we can release pages
600 uintptr_t end_addr = os::mem::AlignDownToPageSize(ToUintPtr(runslots) + RUNSLOTS_SIZE);
601 if (start_addr < end_addr) {
602 os::mem::ReleasePages(start_addr, end_addr);
603 }
604 }
605
606 template <typename AllocConfigT, typename LockConfigT>
IsInMemPools(void * object)607 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::IsInMemPools(void *object)
608 {
609 os::memory::ReadLockHolder rlock(lock_);
610 PoolListElement *current = occupied_tail_;
611 while (current != nullptr) {
612 if (current->IsInUsedMemory(object)) {
613 return true;
614 }
615 current = current->GetPrev();
616 }
617 return false;
618 }
619
620 template <typename AllocConfigT, typename LockConfigT>
621 template <typename ObjectVisitor>
IterateOverObjects(const ObjectVisitor & object_visitor)622 void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::IterateOverObjects(
623 const ObjectVisitor &object_visitor)
624 {
625 PoolListElement *current_pool = nullptr;
626 {
627 os::memory::ReadLockHolder rlock(lock_);
628 current_pool = occupied_tail_;
629 }
630 while (current_pool != nullptr) {
631 current_pool->IterateOverRunSlots([&](RunSlotsType *runslots) {
632 os::memory::LockHolder runslots_lock(*runslots->GetLock());
633 ASSERT(runslots->GetPoolPointer() == ToUintPtr(current_pool));
634 runslots->IterateOverOccupiedSlots(object_visitor);
635 return true;
636 });
637 {
638 os::memory::ReadLockHolder rlock(lock_);
639 current_pool = current_pool->GetPrev();
640 }
641 }
642 }
643
644 template <typename AllocConfigT, typename LockConfigT>
645 template <typename MemVisitor>
VisitAllPools(const MemVisitor & mem_visitor)646 void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::VisitAllPools(const MemVisitor &mem_visitor)
647 {
648 os::memory::WriteLockHolder wlock(lock_);
649 PoolListElement *current_pool = occupied_tail_;
650 while (current_pool != nullptr) {
651 // Use tmp in case if visitor with side effects
652 PoolListElement *tmp = current_pool->GetPrev();
653 mem_visitor(current_pool->GetPoolMemory(), current_pool->GetSize());
654 current_pool = tmp;
655 }
656 }
657
658 template <typename AllocConfigT, typename LockConfigT>
659 template <typename MemVisitor>
VisitAllPoolsWithOccupiedSize(const MemVisitor & mem_visitor)660 void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::VisitAllPoolsWithOccupiedSize(
661 const MemVisitor &mem_visitor)
662 {
663 os::memory::WriteLockHolder wlock(lock_);
664 PoolListElement *current_pool = occupied_tail_;
665 while (current_pool != nullptr) {
666 // Use tmp in case if visitor with side effects
667 PoolListElement *tmp = current_pool->GetPrev();
668 mem_visitor(current_pool->GetPoolMemory(), current_pool->GetOccupiedSize(), current_pool->GetSize());
669 current_pool = tmp;
670 }
671 }
672
673 template <typename AllocConfigT, typename LockConfigT>
674 template <typename MemVisitor>
VisitAndRemoveFreePools(const MemVisitor & mem_visitor)675 void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::VisitAndRemoveFreePools(
676 const MemVisitor &mem_visitor)
677 {
678 os::memory::WriteLockHolder wlock(lock_);
679 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "VisitAllFreePools inside RunSlotsAllocator";
680 // First, iterate over totally free pools:
681 PoolListElement *current_pool = free_tail_;
682 while (current_pool != nullptr) {
683 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "VisitAllFreePools: Visit free pool with addr " << std::hex
684 << current_pool->GetPoolMemory() << " and size " << std::dec
685 << current_pool->GetSize();
686 // Use tmp in case if visitor with side effects
687 PoolListElement *tmp = current_pool->GetPrev();
688 mem_visitor(current_pool->GetPoolMemory(), current_pool->GetSize());
689 current_pool = tmp;
690 }
691 free_tail_ = nullptr;
692 // Second, try to find free pool in occupied:
693 current_pool = occupied_tail_;
694 while (current_pool != nullptr) {
695 // Use tmp in case if visitor with side effects
696 PoolListElement *tmp = current_pool->GetPrev();
697 if (!current_pool->HasUsedMemory()) {
698 LOG_RUNSLOTS_ALLOCATOR(DEBUG)
699 << "VisitAllFreePools: Visit occupied pool with addr " << std::hex << current_pool->GetPoolMemory()
700 << " and size " << std::dec << current_pool->GetSize();
701 // This Pool doesn't have any occupied memory in RunSlots
702 // Therefore, we can free it
703 if (occupied_tail_ == current_pool) {
704 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "VisitAllFreePools: Update occupied_tail_";
705 occupied_tail_ = current_pool->GetPrev();
706 }
707 if (current_pool == partially_occupied_head_) {
708 partially_occupied_head_ = partially_occupied_head_->GetNext();
709 ASSERT((partially_occupied_head_ == nullptr) || (partially_occupied_head_->HasMemoryForRunSlots()));
710 }
711 current_pool->PopFromList();
712 mem_visitor(current_pool->GetPoolMemory(), current_pool->GetSize());
713 }
714 current_pool = tmp;
715 }
716 }
717
718 template <typename AllocConfigT, typename LockConfigT>
PoolListElement()719 inline RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::PoolListElement()
720 {
721 start_mem_ = 0;
722 pool_mem_ = 0;
723 size_ = 0;
724 free_ptr_ = 0;
725 prev_pool_ = nullptr;
726 next_pool_ = nullptr;
727 freeded_runslots_count_ = 0;
728 (void)memset_s(storage_for_bitmap_.data(), sizeof(BitMapStorageType), 0, sizeof(BitMapStorageType));
729 }
730
731 template <typename AllocConfigT, typename LockConfigT>
PopFromList()732 void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::PopFromList()
733 {
734 if (next_pool_ != nullptr) {
735 next_pool_->SetPrev(prev_pool_);
736 }
737 if (prev_pool_ != nullptr) {
738 prev_pool_->SetNext(next_pool_);
739 }
740 next_pool_ = nullptr;
741 prev_pool_ = nullptr;
742 }
743
744 template <typename AllocConfigT, typename LockConfigT>
GetFirstRunSlotsBlock(uintptr_t mem)745 uintptr_t RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::GetFirstRunSlotsBlock(
746 uintptr_t mem)
747 {
748 return AlignUp(mem, 1UL << RUNSLOTS_ALIGNMENT);
749 }
750
751 template <typename AllocConfigT, typename LockConfigT>
Initialize(void * pool_mem,uintptr_t unoccupied_mem,size_t size,PoolListElement * prev)752 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::Initialize(
753 void *pool_mem, uintptr_t unoccupied_mem, size_t size, PoolListElement *prev)
754 {
755 start_mem_ = unoccupied_mem;
756 pool_mem_ = ToUintPtr(pool_mem);
757 size_ = size;
758 free_ptr_ = GetFirstRunSlotsBlock(start_mem_);
759 prev_pool_ = prev;
760 next_pool_ = nullptr;
761 freeded_runslots_count_ = 0;
762 freed_runslots_bitmap_.ReInitializeMemoryRange(pool_mem);
763 ASSERT(freed_runslots_bitmap_.FindFirstMarkedChunks() == nullptr);
764 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "PoolMemory: first free RunSlots block = " << std::hex << free_ptr_;
765 }
766
767 template <typename AllocConfigT, typename LockConfigT>
768 inline typename RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsType *
GetMemoryForRunSlots(size_t slots_size)769 RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::GetMemoryForRunSlots(size_t slots_size)
770 {
771 if (!HasMemoryForRunSlots()) {
772 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "PoolMemory: There is no free memory for RunSlots";
773 return nullptr;
774 }
775 RunSlotsType *runslots = GetFreedRunSlots(slots_size);
776 if (runslots == nullptr) {
777 uintptr_t old_mem = free_ptr_.load();
778 ASSERT(pool_mem_ + size_ >= old_mem + RUNSLOTS_SIZE);
779
780 // Initialize it firstly before updating free ptr
781 // because it will be visible outside after that.
782 runslots = static_cast<RunSlotsType *>(ToVoidPtr(old_mem));
783 runslots->Initialize(slots_size, ToUintPtr(this), true);
784
785 free_ptr_.fetch_add(RUNSLOTS_SIZE);
786 ASSERT(free_ptr_.load() == (old_mem + RUNSLOTS_SIZE));
787 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "PoolMemory: Took memory for RunSlots from addr " << std::hex
788 << ToVoidPtr(old_mem)
789 << ". New first free RunSlots block = " << ToVoidPtr(free_ptr_.load());
790 }
791 ASSERT(runslots != nullptr);
792 return runslots;
793 }
794
795 template <typename AllocConfigT, typename LockConfigT>
796 template <typename RunSlotsVisitor>
IterateOverRunSlots(const RunSlotsVisitor & runslots_visitor)797 void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::IterateOverRunSlots(
798 const RunSlotsVisitor &runslots_visitor)
799 {
800 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Iterating over runslots inside pool with address" << std::hex << pool_mem_
801 << " with size " << std::dec << size_ << " bytes";
802 uintptr_t current_runslot = GetFirstRunSlotsBlock(start_mem_);
803 uintptr_t last_runslot = free_ptr_.load();
804 while (current_runslot < last_runslot) {
805 ASSERT(start_mem_ <= current_runslot);
806 if (!freed_runslots_bitmap_.AtomicTest(ToVoidPtr(current_runslot))) {
807 auto cur_rs = static_cast<RunSlotsType *>(ToVoidPtr(current_runslot));
808 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Iterating. Process RunSlots " << std::hex << cur_rs;
809 if (!runslots_visitor(cur_rs)) {
810 return;
811 }
812 }
813 current_runslot += RUNSLOTS_SIZE;
814 }
815 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Iterating runslots inside this pool finished";
816 }
817
818 template <typename AllocConfigT, typename LockConfigT>
HasUsedMemory()819 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::HasUsedMemory()
820 {
821 uintptr_t current_runslot = GetFirstRunSlotsBlock(start_mem_);
822 uintptr_t last_runslot = free_ptr_.load();
823 while (current_runslot < last_runslot) {
824 ASSERT(start_mem_ <= current_runslot);
825 if (!freed_runslots_bitmap_.AtomicTest(ToVoidPtr(current_runslot))) {
826 // We have runslots instance which is in use somewhere.
827 return true;
828 }
829 current_runslot += RUNSLOTS_SIZE;
830 }
831 return false;
832 }
833
834 template <typename AllocConfigT, typename LockConfigT>
GetOccupiedSize()835 size_t RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::GetOccupiedSize()
836 {
837 if (!IsInitialized()) {
838 return 0;
839 }
840 return free_ptr_.load() - pool_mem_;
841 }
842
843 template <typename AllocConfigT, typename LockConfigT>
IsInUsedMemory(void * object)844 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::IsInUsedMemory(void *object)
845 {
846 uintptr_t mem_pointer = start_mem_;
847 ASSERT(!((ToUintPtr(object) < GetFirstRunSlotsBlock(mem_pointer)) && (ToUintPtr(object) >= mem_pointer)));
848 bool is_in_allocated_memory =
849 (ToUintPtr(object) < free_ptr_.load()) && (ToUintPtr(object) >= GetFirstRunSlotsBlock(mem_pointer));
850 return is_in_allocated_memory && !IsInFreedRunSlots(object);
851 }
852
853 template <typename AllocConfigT, typename LockConfigT>
854 typename RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsType *
GetFreedRunSlots(size_t slots_size)855 RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::GetFreedRunSlots(size_t slots_size)
856 {
857 auto slots = static_cast<RunSlotsType *>(freed_runslots_bitmap_.FindFirstMarkedChunks());
858 if (slots == nullptr) {
859 ASSERT(freeded_runslots_count_ == 0);
860 return nullptr;
861 }
862
863 // Initialize it firstly before updating bitmap
864 // because it will be visible outside after that.
865 slots->Initialize(slots_size, ToUintPtr(this), true);
866
867 ASSERT(freeded_runslots_count_ > 0);
868 [[maybe_unused]] bool old_val = freed_runslots_bitmap_.AtomicTestAndClear(slots);
869 ASSERT(old_val);
870 freeded_runslots_count_--;
871
872 return slots;
873 }
874
875 template <typename AllocConfigT, typename LockConfigT>
HasMemoryForRunSlots()876 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::HasMemoryForRunSlots()
877 {
878 if (!IsInitialized()) {
879 return false;
880 }
881 bool has_free_memory = (free_ptr_.load() + RUNSLOTS_SIZE) <= (pool_mem_ + size_);
882 bool has_freed_runslots = (freeded_runslots_count_ > 0);
883 ASSERT(has_freed_runslots == (freed_runslots_bitmap_.FindFirstMarkedChunks() != nullptr));
884 return has_free_memory || has_freed_runslots;
885 }
886
887 #undef LOG_RUNSLOTS_ALLOCATOR
888
889 } // namespace panda::mem
890 #endif // PANDA_RUNTIME_MEM_RUNSLOTS_ALLOCATOR_INL_H_
891