• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef PANDA_RUNTIME_MEM_FREELIST_ALLOCATOR_INL_H_
17 #define PANDA_RUNTIME_MEM_FREELIST_ALLOCATOR_INL_H_
18 
19 #include "libpandabase/utils/logger.h"
20 #include "runtime/mem/alloc_config.h"
21 #include "runtime/mem/freelist_allocator.h"
22 #include "runtime/mem/object_helpers.h"
23 
24 namespace panda::mem {
25 
26 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
27 #define LOG_FREELIST_ALLOCATOR(level) LOG(level, ALLOC) << "FreeListAllocator: "
28 
29 template <typename AllocConfigT, typename LockConfigT>
FreeListAllocator(MemStatsType * mem_stats,SpaceType type_allocation)30 FreeListAllocator<AllocConfigT, LockConfigT>::FreeListAllocator(MemStatsType *mem_stats, SpaceType type_allocation)
31     : type_allocation_(type_allocation), mem_stats_(mem_stats)
32 {
33     LOG_FREELIST_ALLOCATOR(DEBUG) << "Initializing FreeListAllocator";
34     ASAN_POISON_MEMORY_REGION(&segregated_list_, sizeof(segregated_list_));
35     LOG_FREELIST_ALLOCATOR(INFO) << "Initializing FreeListAllocator finished";
36 }
37 
38 template <typename AllocConfigT, typename LockConfigT>
~FreeListAllocator()39 FreeListAllocator<AllocConfigT, LockConfigT>::~FreeListAllocator()
40 {
41     LOG_FREELIST_ALLOCATOR(DEBUG) << "Destroying FreeListAllocator";
42     ASAN_UNPOISON_MEMORY_REGION(&segregated_list_, sizeof(segregated_list_));
43     LOG_FREELIST_ALLOCATOR(INFO) << "Destroying FreeListAllocator finished";
44 }
45 
46 template <typename AllocConfigT, typename LockConfigT>
Alloc(size_t size,Alignment align)47 void *FreeListAllocator<AllocConfigT, LockConfigT>::Alloc(size_t size, Alignment align)
48 {
49     os::memory::WriteLockHolder wlock(alloc_free_lock_);
50     LOG_FREELIST_ALLOCATOR(DEBUG) << "Try to allocate object with size " << std::dec << size;
51     size_t alloc_size = size;
52     if (alloc_size < FREELIST_ALLOCATOR_MIN_SIZE) {
53         LOG_FREELIST_ALLOCATOR(DEBUG) << "Try to allocate an object with size less than min for this allocator";
54         alloc_size = FREELIST_ALLOCATOR_MIN_SIZE;
55     }
56     size_t def_aligned_size = AlignUp(alloc_size, GetAlignmentInBytes(FREELIST_DEFAULT_ALIGNMENT));
57     if (def_aligned_size > alloc_size) {
58         alloc_size = def_aligned_size;
59         LOG_FREELIST_ALLOCATOR(DEBUG) << "Align size to default alignment. New size = " << alloc_size;
60     }
61     if (alloc_size > FREELIST_MAX_ALLOC_SIZE) {
62         LOG_FREELIST_ALLOCATOR(DEBUG) << "Try allocate too big memory for free list allocator. Return nullptr";
63         return nullptr;
64     }
65     // Get best-fit memory piece from segregated list.
66     MemoryBlockHeader *memory_block = GetFromSegregatedList(alloc_size, align);
67     if (memory_block == nullptr) {
68         LOG_FREELIST_ALLOCATOR(DEBUG) << "Couldn't allocate memory";
69         return nullptr;
70     }
71     LOG_FREELIST_ALLOCATOR(DEBUG) << "Found memory block at addr = " << std::hex << memory_block << " with size "
72                                   << std::dec << memory_block->GetSize();
73     ASSERT(memory_block->GetSize() >= alloc_size);
74     uintptr_t memory_pointer = ToUintPtr(memory_block->GetMemory());
75     bool required_padding = false;
76     if ((memory_pointer & (GetAlignmentInBytes(align) - 1)) != 0U) {
77         LOG_FREELIST_ALLOCATOR(DEBUG) << "Raw memory is not aligned as we need. Create special header for padding";
78         // Raw memory pointer is not aligned as we expected
79         // We need to create extra header inside
80         uintptr_t aligned_memory_pointer =
81             AlignUp(memory_pointer + sizeof(MemoryBlockHeader), GetAlignmentInBytes(align));
82         size_t size_with_padding = alloc_size + (aligned_memory_pointer - memory_pointer);
83         ASSERT(memory_block->GetSize() >= size_with_padding);
84         auto padding_header =
85             static_cast<MemoryBlockHeader *>(ToVoidPtr(aligned_memory_pointer - sizeof(MemoryBlockHeader)));
86         LOG_FREELIST_ALLOCATOR(DEBUG) << "Created padding header at addr " << std::hex << padding_header;
87         padding_header->Initialize(alloc_size, memory_block);
88         padding_header->SetAsPaddingHeader();
89         // Update values
90         memory_pointer = aligned_memory_pointer;
91         alloc_size = size_with_padding;
92         required_padding = true;
93     }
94     if (CanCreateNewBlockFromRemainder(memory_block, alloc_size)) {
95         LOG_FREELIST_ALLOCATOR(DEBUG) << "Created new memory block from the remainder part:";
96         MemoryBlockHeader *new_free_block = SplitMemoryBlocks(memory_block, alloc_size);
97         LOG_FREELIST_ALLOCATOR(DEBUG) << "New block started at addr " << std::hex << new_free_block << " with size "
98                                       << std::dec << new_free_block->GetSize();
99         memory_block->SetUsed();
100         FreeListHeader *new_free_list_element = TryToCoalescing(new_free_block);
101         ASSERT(!new_free_list_element->IsUsed());
102         AddToSegregatedList(new_free_list_element);
103     } else {
104         LOG_FREELIST_ALLOCATOR(DEBUG) << "Can't create new block from the remainder. Use full block.";
105         memory_block->SetUsed();
106     }
107     if (required_padding) {
108         // We must update some values in current memory_block
109         uintptr_t padding_size = memory_pointer - ToUintPtr(memory_block->GetMemory());
110         if (padding_size == sizeof(MemoryBlockHeader)) {
111             LOG_FREELIST_ALLOCATOR(DEBUG) << "SetHasPaddingHeaderAfter";
112             memory_block->SetPaddingHeaderStoredAfterHeader();
113         } else {
114             LOG_FREELIST_ALLOCATOR(DEBUG) << "SetHasPaddingSizeAfter, size = " << padding_size;
115             memory_block->SetPaddingSizeStoredAfterHeader();
116             memory_block->SetPaddingSize(padding_size);
117         }
118     }
119     LOG_FREELIST_ALLOCATOR(INFO) << "Allocated memory at addr " << std::hex << ToVoidPtr(memory_pointer);
120     {
121         AllocConfigT::OnAlloc(memory_block->GetSize(), type_allocation_, mem_stats_);
122         // It is not the object size itself, cause we can't compute it from MemoryBlockHeader structure at Free call.
123         // It is an approximation.
124         size_t current_size =
125             ToUintPtr(memory_block) + memory_block->GetSize() + sizeof(MemoryBlockHeader) - memory_pointer;
126         AllocConfigT::AddToCrossingMap(ToVoidPtr(memory_pointer), current_size);
127     }
128     ASAN_UNPOISON_MEMORY_REGION(ToVoidPtr(memory_pointer), size);
129     AllocConfigT::MemoryInit(ToVoidPtr(memory_pointer), size);
130     return ToVoidPtr(memory_pointer);
131 }
132 
133 template <typename AllocConfigT, typename LockConfigT>
Free(void * mem)134 void FreeListAllocator<AllocConfigT, LockConfigT>::Free(void *mem)
135 {
136     os::memory::WriteLockHolder wlock(alloc_free_lock_);
137     FreeUnsafe(mem);
138 }
139 
140 template <typename AllocConfigT, typename LockConfigT>
FreeUnsafe(void * mem)141 void FreeListAllocator<AllocConfigT, LockConfigT>::FreeUnsafe(void *mem)
142 {
143     if (UNLIKELY(mem == nullptr)) {
144         LOG_FREELIST_ALLOCATOR(DEBUG) << "Try to free memory at invalid addr 0";
145         return;
146     }
147     LOG_FREELIST_ALLOCATOR(DEBUG) << "Try to free memory at addr " << std::hex << mem;
148 #ifndef NDEBUG
149     if (!AllocatedByFreeListAllocatorUnsafe(mem)) {
150         LOG_FREELIST_ALLOCATOR(DEBUG) << "Try to free memory no from this allocator";
151         return;
152     }
153 #endif  // !NDEBUG
154 
155     MemoryBlockHeader *memory_header = GetFreeListMemoryHeader(mem);
156     LOG_FREELIST_ALLOCATOR(DEBUG) << "It is a memory with header " << std::hex << memory_header << " with size "
157                                   << std::dec << memory_header->GetSize() << " (probably with padding)";
158     {
159         AllocConfigT::OnFree(memory_header->GetSize(), type_allocation_, mem_stats_);
160         // It is not the object size itself, cause we can't compute it from MemoryBlockHeader structure.
161         // It is an approximation.
162         size_t current_size =
163             ToUintPtr(memory_header) + memory_header->GetSize() + sizeof(MemoryBlockHeader) - ToUintPtr(mem);
164         MemoryBlockHeader *prev_used_header = memory_header->GetPrevUsedHeader();
165         void *prev_object = nullptr;
166         size_t prev_size = 0;
167         if (prev_used_header != nullptr) {
168             prev_object = prev_used_header->GetMemory();
169             prev_size = ToUintPtr(prev_used_header) + prev_used_header->GetSize() + sizeof(MemoryBlockHeader) -
170                         ToUintPtr(prev_used_header->GetMemory());
171         }
172         MemoryBlockHeader *next_used_header = memory_header->GetNextUsedHeader();
173         void *next_object = nullptr;
174         if (next_used_header != nullptr) {
175             next_object = next_used_header->GetMemory();
176         }
177         AllocConfigT::RemoveFromCrossingMap(mem, current_size, next_object, prev_object, prev_size);
178     }
179     memory_header->SetUnused();
180     FreeListHeader *new_free_list_element = TryToCoalescing(memory_header);
181     ASAN_POISON_MEMORY_REGION(new_free_list_element, new_free_list_element->GetSize() + sizeof(MemoryBlockHeader));
182     AddToSegregatedList(new_free_list_element);
183     LOG_FREELIST_ALLOCATOR(INFO) << "Freed memory at addr " << std::hex << mem;
184 }
185 
186 // but it requires pool alignment restriction
187 // (we must compute memory pool header addr from a memory block addr stored inside it)
188 
189 // During Collect method call we iterate over memory blocks in each pool.
190 // This iteration can cause race conditions in multithreaded mode.
191 // E.g. in this scenario:
192 // |-------|---------|-------------------|------------------------------------------------------------------|
193 // | time: | Thread: |    Description:   |                         Memory footprint:                        |
194 // |-------|---------|-------------------|------------------------------------------------------------------|
195 // |       |         | Thread0 starts    |  |..............Free  Block.............|...Allocated block...|  |
196 // |       |         | iterating         |  |                                                               |
197 // |   0   |    0    | over mem blocks   |  current block pointer                                           |
198 // |       |         | and current block |                                                                  |
199 // |       |         | is free block     |                                                                  |
200 // |-------|---------|-------------------|------------------------------------------------------------------|
201 // |       |         | Thread1           |  |...Allocated block...|................|...Allocated block...|  |
202 // |   1   |    1    | allocates memory  |                        |                                         |
203 // |       |         | at this block     |               Unused memory peace                                |
204 // |       |         |                   |                                                                  |
205 // |-------|---------|-------------------|------------------------------------------------------------------|
206 // |       |         | Thread1           |  |...Allocated block...|................|...Allocated block...|  |
207 // |   2   |    1    | set up values in  |  |                                                               |
208 // |       |         | this block header |  change size of this block                                       |
209 // |       |         | (set up size)     |                                                                  |
210 // |-------|---------|-------------------|------------------------------------------------------------------|
211 // |       |         | Thread0 reads     |  |...Allocated block...|................|...Allocated block...|  |
212 // |       |         | some garbage or   |                                                                  |
213 // |   3   |    0    | wrong value to    |  current block pointer - points to wrong memory                  |
214 // |       |         | calculate next    |                                                                  |
215 // |       |         | block pointer     |                                                                  |
216 // |-------|---------|-------------------|------------------------------------------------------------------|
217 //
218 // Therefore, we must unlock allocator's alloc/free methods only
219 // when we have a pointer to allocated block (i.e. IsUsed())
220 
221 template <typename AllocConfigT, typename LockConfigT>
Collect(const GCObjectVisitor & death_checker_fn)222 void FreeListAllocator<AllocConfigT, LockConfigT>::Collect(const GCObjectVisitor &death_checker_fn)
223 {
224     LOG_FREELIST_ALLOCATOR(DEBUG) << "Collecting started";
225     IterateOverObjects([&](ObjectHeader *mem) {
226         if (death_checker_fn(mem) == ObjectStatus::DEAD_OBJECT) {
227             LOG(DEBUG, GC) << "DELETE OBJECT " << GetDebugInfoAboutObject(mem);
228             FreeUnsafe(mem);
229         }
230     });
231     LOG_FREELIST_ALLOCATOR(DEBUG) << "Collecting finished";
232 }
233 
234 template <typename AllocConfigT, typename LockConfigT>
235 template <typename ObjectVisitor>
IterateOverObjects(const ObjectVisitor & object_visitor)236 void FreeListAllocator<AllocConfigT, LockConfigT>::IterateOverObjects(const ObjectVisitor &object_visitor)
237 {
238     LOG_FREELIST_ALLOCATOR(DEBUG) << "Iterating over objects started";
239     MemoryPoolHeader *current_pool = nullptr;
240     {
241         // Do this under lock because the pointer for mempool_tail can be changed by other threads
242         // in AddMemoryPool method call.
243         // NOTE: We add each new pool at the mempool_tail_. Therefore, we can read it once and iterate to head.
244         os::memory::ReadLockHolder rlock(alloc_free_lock_);
245         current_pool = mempool_tail_;
246     }
247     while (current_pool != nullptr) {
248         LOG_FREELIST_ALLOCATOR(DEBUG) << "  iterate over " << std::hex << current_pool;
249         MemoryBlockHeader *current_mem_header = current_pool->GetFirstMemoryHeader();
250         while (current_mem_header != nullptr) {
251             // Lock any possible memory headers changes in the allocator.
252             os::memory::WriteLockHolder wlock(alloc_free_lock_);
253             if (current_mem_header->IsUsed()) {
254                 // We can call Free inside mem_visitor, that's why we should lock everything
255                 object_visitor(static_cast<ObjectHeader *>(current_mem_header->GetMemory()));
256                 // At this point, current_mem_header can point to a nonexistent memory block
257                 // (e.g., if we coalesce it with the previous free block).
258                 // However, we still have valid MemoryBlockHeader class field here.
259                 // NOTE: Firstly, we coalesce current_mem_header block with next (if it is free)
260                 // and update current block size. Therefore, we have a valid pointer to next memory block.
261 
262                 // After calling mem_visitor() the current_mem_header may points to free block of memory,
263                 // which can be modified at the alloc call in other thread.
264                 // Therefore, do not unlock until we have a pointer to the Used block.
265                 current_mem_header = current_mem_header->GetNextUsedHeader();
266             } else {
267                 // Current header marked as Unused so it can be modify by an allocation in some thread.
268                 // So, read next header with in locked state.
269                 current_mem_header = current_mem_header->GetNextUsedHeader();
270             }
271             // We have a pointer to Used memory block, or to nullptr.
272             // Therefore, we can unlock.
273         }
274         current_pool = current_pool->GetPrev();
275     }
276     LOG_FREELIST_ALLOCATOR(DEBUG) << "Iterating over objects finished";
277 }
278 
279 template <typename AllocConfigT, typename LockConfigT>
AddMemoryPool(void * mem,size_t size)280 bool FreeListAllocator<AllocConfigT, LockConfigT>::AddMemoryPool(void *mem, size_t size)
281 {
282     // Lock alloc/free cause we add new block to segregated list here.
283     os::memory::WriteLockHolder wlock(alloc_free_lock_);
284     ASSERT(mem != nullptr);
285     LOG_FREELIST_ALLOCATOR(INFO) << "Add memory pool to FreeListAllocator from  " << std::hex << mem << " with size "
286                                  << std::dec << size;
287     ASSERT((ToUintPtr(mem) & (sizeof(MemoryBlockHeader) - 1)) == 0U);
288     auto mempool_header = static_cast<MemoryPoolHeader *>(mem);
289     if (mempool_head_ == nullptr) {
290         LOG_FREELIST_ALLOCATOR(DEBUG) << "Initialize mempool_head_";
291         mempool_header->Initialize(size, nullptr, nullptr);
292         mempool_head_ = mempool_header;
293         mempool_tail_ = mempool_header;
294     } else {
295         LOG_FREELIST_ALLOCATOR(DEBUG) << "Add this memory pool at the tail after block " << std::hex << mempool_tail_;
296         mempool_header->Initialize(size, mempool_tail_, nullptr);
297         mempool_tail_->SetNext(mempool_header);
298         mempool_tail_ = mempool_header;
299     }
300     MemoryBlockHeader *first_mem_header = mempool_header->GetFirstMemoryHeader();
301     first_mem_header->Initialize(size - sizeof(MemoryBlockHeader) - sizeof(MemoryPoolHeader), nullptr);
302     first_mem_header->SetLastBlockInPool();
303     AddToSegregatedList(static_cast<FreeListHeader *>(first_mem_header));
304     ASAN_POISON_MEMORY_REGION(mem, size);
305     AllocConfigT::InitializeCrossingMapForMemory(mem, size);
306     return true;
307 }
308 
309 template <typename AllocConfigT, typename LockConfigT>
310 template <typename MemVisitor>
VisitAndRemoveAllPools(const MemVisitor & mem_visitor)311 void FreeListAllocator<AllocConfigT, LockConfigT>::VisitAndRemoveAllPools(const MemVisitor &mem_visitor)
312 {
313     // We call this method and return pools to the system.
314     // Therefore, delete all objects to clear all external dependences
315     LOG_FREELIST_ALLOCATOR(DEBUG) << "Clear all objects inside the allocator";
316     // Lock everything to avoid race condition.
317     os::memory::WriteLockHolder wlock(alloc_free_lock_);
318     MemoryPoolHeader *current_pool = mempool_head_;
319     while (current_pool != nullptr) {
320         // Use tmp in case if visitor with side effects
321         MemoryPoolHeader *tmp = current_pool->GetNext();
322         AllocConfigT::RemoveCrossingMapForMemory(current_pool, current_pool->GetSize());
323         mem_visitor(current_pool, current_pool->GetSize());
324         current_pool = tmp;
325     }
326 }
327 
328 template <typename AllocConfigT, typename LockConfigT>
329 template <typename MemVisitor>
330 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_FUNCTION_DECL_PARENTHESIS_PARAM_TYPE)
VisitAndRemoveFreePools(const MemVisitor & mem_visitor)331 void FreeListAllocator<AllocConfigT, LockConfigT>::VisitAndRemoveFreePools(const MemVisitor &mem_visitor)
332 {
333     // Lock everything to avoid race condition.
334     os::memory::WriteLockHolder wlock(alloc_free_lock_);
335     MemoryPoolHeader *current_pool = mempool_head_;
336     while (current_pool != nullptr) {
337         // Use tmp in case if visitor with side effects
338         MemoryPoolHeader *tmp = current_pool->GetNext();
339         MemoryBlockHeader *first_block = current_pool->GetFirstMemoryHeader();
340         if (first_block->IsLastBlockInPool() && !first_block->IsUsed()) {
341             // We have only one big memory block in this pool,
342             // and this block is not used
343             LOG_FREELIST_ALLOCATOR(DEBUG)
344                 << "VisitAndRemoveFreePools: Remove free memory pool from allocator with start addr" << std::hex
345                 << current_pool << " and size " << std::dec << current_pool->GetSize()
346                 << " bytes with the first block at addr " << std::hex << first_block << " and size " << std::dec
347                 << first_block->GetSize();
348             auto free_header = static_cast<FreeListHeader *>(first_block);
349             free_header->PopFromFreeList();
350             MemoryPoolHeader *next = current_pool->GetNext();
351             MemoryPoolHeader *prev = current_pool->GetPrev();
352             if (next != nullptr) {
353                 ASSERT(next->GetPrev() == current_pool);
354                 next->SetPrev(prev);
355             } else {
356                 // This means that current pool is the last
357                 ASSERT(mempool_tail_ == current_pool);
358                 LOG_FREELIST_ALLOCATOR(DEBUG) << "VisitAndRemoveFreePools: Change pools tail pointer";
359                 mempool_tail_ = prev;
360             }
361             if (prev != nullptr) {
362                 ASSERT(prev->GetNext() == current_pool);
363                 prev->SetNext(next);
364             } else {
365                 // This means that current pool is the first
366                 ASSERT(mempool_head_ == current_pool);
367                 LOG_FREELIST_ALLOCATOR(DEBUG) << "VisitAndRemoveFreePools: Change pools head pointer";
368                 mempool_head_ = next;
369             }
370             AllocConfigT::RemoveCrossingMapForMemory(current_pool, current_pool->GetSize());
371             mem_visitor(current_pool, current_pool->GetSize());
372         }
373         current_pool = tmp;
374     }
375     segregated_list_.ReleaseFreeMemoryBlocks();
376 }
377 
378 template <typename AllocConfigT, typename LockConfigT>
379 template <typename MemVisitor>
IterateOverObjectsInRange(const MemVisitor & mem_visitor,void * left_border,void * right_border)380 void FreeListAllocator<AllocConfigT, LockConfigT>::IterateOverObjectsInRange(const MemVisitor &mem_visitor,
381                                                                              void *left_border, void *right_border)
382 {
383     // NOTE: Current implementation doesn't look at PANDA_CROSSING_MAP_MANAGE_CROSSED_BORDER flag
384     LOG_FREELIST_ALLOCATOR(DEBUG) << "FreeListAllocator::IterateOverObjectsInRange for range [" << std::hex
385                                   << left_border << ", " << right_border << "]";
386     ASSERT(ToUintPtr(right_border) >= ToUintPtr(left_border));
387     // if the range crosses different allocators memory pools
388     ASSERT(ToUintPtr(right_border) - ToUintPtr(left_border) ==
389            (CrossingMapSingleton::GetCrossingMapGranularity() - 1U));
390     ASSERT((ToUintPtr(right_border) & (~(CrossingMapSingleton::GetCrossingMapGranularity() - 1U))) ==
391            (ToUintPtr(left_border) & (~(CrossingMapSingleton::GetCrossingMapGranularity() - 1U))));
392     MemoryBlockHeader *first_memory_header = nullptr;
393     {
394         // Do this under lock because the pointer to the first object in CrossingMap can be changed during
395         // CrossingMap call.
396         os::memory::ReadLockHolder rlock(alloc_free_lock_);
397         if (!AllocatedByFreeListAllocatorUnsafe(left_border) && !AllocatedByFreeListAllocatorUnsafe(right_border)) {
398             LOG_FREELIST_ALLOCATOR(DEBUG) << "This memory range is not covered by this allocator";
399             return;
400         }
401         void *obj_addr = AllocConfigT::FindFirstObjInCrossingMap(left_border, right_border);
402         if (obj_addr == nullptr) {
403             return;
404         }
405         ASSERT(AllocatedByFreeListAllocatorUnsafe(obj_addr));
406         MemoryBlockHeader *memory_header = GetFreeListMemoryHeader(obj_addr);
407         // Memory header is a pointer to an object which starts in this range or in the previous.
408         // In the second case, this object may not crosses the border of the current range.
409         // (but there is an object stored after it, which crosses the current range).
410         ASSERT(ToUintPtr(memory_header->GetMemory()) <= ToUintPtr(right_border));
411         first_memory_header = memory_header;
412     }
413     ASSERT(first_memory_header != nullptr);
414     // Let's start iteration:
415     MemoryBlockHeader *current_mem_header = first_memory_header;
416     LOG_FREELIST_ALLOCATOR(DEBUG) << "FreeListAllocator::IterateOverObjectsInRange start iterating from obj with addr "
417                                   << std::hex << first_memory_header->GetMemory();
418     while (current_mem_header != nullptr) {
419         // We don't lock allocator because we point to the used block which can't be changed
420         // during the iteration in range.
421         void *obj_addr = current_mem_header->GetMemory();
422         if (ToUintPtr(obj_addr) > ToUintPtr(right_border)) {
423             // Iteration over
424             break;
425         }
426         LOG_FREELIST_ALLOCATOR(DEBUG)
427             << "FreeListAllocator::IterateOverObjectsInRange found obj in this range with addr " << std::hex
428             << obj_addr;
429         mem_visitor(static_cast<ObjectHeader *>(obj_addr));
430         {
431             os::memory::ReadLockHolder rlock(alloc_free_lock_);
432             current_mem_header = current_mem_header->GetNextUsedHeader();
433         }
434     }
435     LOG_FREELIST_ALLOCATOR(DEBUG) << "FreeListAllocator::IterateOverObjectsInRange finished";
436 }
437 
438 template <typename AllocConfigT, typename LockConfigT>
CoalesceMemoryBlocks(MemoryBlockHeader * first_block,MemoryBlockHeader * second_block)439 void FreeListAllocator<AllocConfigT, LockConfigT>::CoalesceMemoryBlocks(MemoryBlockHeader *first_block,
440                                                                         MemoryBlockHeader *second_block)
441 {
442     LOG_FREELIST_ALLOCATOR(DEBUG) << "CoalesceMemoryBlock: "
443                                   << "first block = " << std::hex << first_block << " with size " << std::dec
444                                   << first_block->GetSize() << " ; second block = " << std::hex << second_block
445                                   << " with size " << std::dec << second_block->GetSize();
446     ASSERT(first_block->GetNextHeader() == second_block);
447     ASSERT(first_block->CanBeCoalescedWithNext() || second_block->CanBeCoalescedWithPrev());
448     first_block->Initialize(first_block->GetSize() + second_block->GetSize() + sizeof(MemoryBlockHeader),
449                             first_block->GetPrevHeader());
450     if (second_block->IsLastBlockInPool()) {
451         LOG_FREELIST_ALLOCATOR(DEBUG) << "CoalesceMemoryBlock: second_block was the last in a pool";
452         first_block->SetLastBlockInPool();
453     } else {
454         first_block->GetNextHeader()->SetPrevHeader(first_block);
455     }
456 }
457 
458 template <typename AllocConfigT, typename LockConfigT>
SplitMemoryBlocks(MemoryBlockHeader * memory_block,size_t first_block_size)459 freelist::MemoryBlockHeader *FreeListAllocator<AllocConfigT, LockConfigT>::SplitMemoryBlocks(
460     MemoryBlockHeader *memory_block, size_t first_block_size)
461 {
462     ASSERT(memory_block->GetSize() > (first_block_size + sizeof(MemoryBlockHeader)));
463     ASSERT(!memory_block->IsUsed());
464     auto second_block =
465         static_cast<MemoryBlockHeader *>(ToVoidPtr(ToUintPtr(memory_block->GetMemory()) + first_block_size));
466     size_t second_block_size = memory_block->GetSize() - first_block_size - sizeof(MemoryBlockHeader);
467     second_block->Initialize(second_block_size, memory_block);
468     if (memory_block->IsLastBlockInPool()) {
469         second_block->SetLastBlockInPool();
470     } else {
471         second_block->GetNextHeader()->SetPrevHeader(second_block);
472     }
473     memory_block->Initialize(first_block_size, memory_block->GetPrevHeader());
474     return second_block;
475 }
476 
477 template <typename AllocConfigT, typename LockConfigT>
GetFreeListMemoryHeader(void * mem)478 freelist::MemoryBlockHeader *FreeListAllocator<AllocConfigT, LockConfigT>::GetFreeListMemoryHeader(void *mem)
479 {
480     ASSERT(mem != nullptr);
481     auto memory_header = static_cast<MemoryBlockHeader *>(ToVoidPtr(ToUintPtr(mem) - sizeof(MemoryBlockHeader)));
482     if (!memory_header->IsPaddingHeader()) {
483         // We got correct header of this memory, just return it
484         return memory_header;
485     }
486     // This is aligned memory with some free space before the memory pointer
487     // The previous header must be a pointer to correct header of this memory block
488     LOG_FREELIST_ALLOCATOR(DEBUG) << "It is a memory with padding at head";
489     return memory_header->GetPrevHeader();
490 }
491 
492 template <typename AllocConfigT, typename LockConfigT>
AllocatedByFreeListAllocator(void * mem)493 bool FreeListAllocator<AllocConfigT, LockConfigT>::AllocatedByFreeListAllocator(void *mem)
494 {
495     os::memory::ReadLockHolder rlock(alloc_free_lock_);
496     return AllocatedByFreeListAllocatorUnsafe(mem);
497 }
498 
499 template <typename AllocConfigT, typename LockConfigT>
AllocatedByFreeListAllocatorUnsafe(void * mem)500 bool FreeListAllocator<AllocConfigT, LockConfigT>::AllocatedByFreeListAllocatorUnsafe(void *mem)
501 {
502     MemoryPoolHeader *current_pool = mempool_head_;
503     while (current_pool != nullptr) {
504         // This assert means that we asked about memory inside MemoryPoolHeader
505         ASSERT(!((ToUintPtr(current_pool->GetFirstMemoryHeader()) > ToUintPtr(mem)) &&
506                  (ToUintPtr(current_pool) < ToUintPtr(mem))));
507         if ((ToUintPtr(current_pool->GetFirstMemoryHeader()) < ToUintPtr(mem)) &&
508             ((ToUintPtr(current_pool) + current_pool->GetSize()) > ToUintPtr(mem))) {
509             return true;
510         }
511         current_pool = current_pool->GetNext();
512     }
513     return false;
514 }
515 
516 template <typename AllocConfigT, typename LockConfigT>
TryToCoalescing(MemoryBlockHeader * memory_header)517 freelist::FreeListHeader *FreeListAllocator<AllocConfigT, LockConfigT>::TryToCoalescing(
518     MemoryBlockHeader *memory_header)
519 {
520     ASSERT(memory_header != nullptr);
521     LOG_FREELIST_ALLOCATOR(DEBUG) << "TryToCoalescing memory block";
522     if (memory_header->CanBeCoalescedWithNext()) {
523         ASSERT(!memory_header->GetNextHeader()->IsUsed());
524         LOG_FREELIST_ALLOCATOR(DEBUG) << "Coalesce with next block";
525         auto next_free_list = static_cast<FreeListHeader *>(memory_header->GetNextHeader());
526         // Pop this free list element from the list
527         next_free_list->PopFromFreeList();
528         // Combine these two blocks together
529         CoalesceMemoryBlocks(memory_header, static_cast<MemoryBlockHeader *>(next_free_list));
530     }
531     if (memory_header->CanBeCoalescedWithPrev()) {
532         ASSERT(!memory_header->GetPrevHeader()->IsUsed());
533         LOG_FREELIST_ALLOCATOR(DEBUG) << "Coalesce with prev block";
534         auto prev_free_list = static_cast<FreeListHeader *>(memory_header->GetPrevHeader());
535         // Pop this free list element from the list
536         prev_free_list->PopFromFreeList();
537         // Combine these two blocks together
538         CoalesceMemoryBlocks(static_cast<MemoryBlockHeader *>(prev_free_list), memory_header);
539         memory_header = static_cast<MemoryBlockHeader *>(prev_free_list);
540     }
541     return static_cast<FreeListHeader *>(memory_header);
542 }
543 
544 template <typename AllocConfigT, typename LockConfigT>
AddToSegregatedList(FreeListHeader * free_list_element)545 void FreeListAllocator<AllocConfigT, LockConfigT>::AddToSegregatedList(FreeListHeader *free_list_element)
546 {
547     LOG_FREELIST_ALLOCATOR(DEBUG) << "AddToSegregatedList: Add new block into segregated-list with size "
548                                   << free_list_element->GetSize();
549     segregated_list_.AddMemoryBlock(free_list_element);
550 }
551 
552 template <typename AllocConfigT, typename LockConfigT>
GetFromSegregatedList(size_t size,Alignment align)553 freelist::MemoryBlockHeader *FreeListAllocator<AllocConfigT, LockConfigT>::GetFromSegregatedList(size_t size,
554                                                                                                  Alignment align)
555 {
556     LOG_FREELIST_ALLOCATOR(DEBUG) << "GetFromSegregatedList: Try to find memory for size " << size << " with alignment "
557                                   << align;
558     size_t aligned_size = size;
559     if (align != FREELIST_DEFAULT_ALIGNMENT) {
560         // Consider this:
561         // (GetAlignmentInBytes(align) + sizeof(MemoryBlockHeader) - GetAlignmentInBytes(FREELIST_DEFAULT_ALIGNMENT))
562         aligned_size += (GetAlignmentInBytes(align) + sizeof(MemoryBlockHeader));
563     }
564     FreeListHeader *mem_block = segregated_list_.FindMemoryBlock(aligned_size);
565     if (mem_block != nullptr) {
566         mem_block->PopFromFreeList();
567         ASSERT((AlignUp(ToUintPtr(mem_block->GetMemory()), GetAlignmentInBytes(align)) -
568                 ToUintPtr(mem_block->GetMemory()) + size) <= mem_block->GetSize());
569     }
570     return static_cast<MemoryBlockHeader *>(mem_block);
571 }
572 
573 template <typename AllocConfigT, typename LockConfigT>
Initialize(size_t size,MemoryPoolHeader * prev,MemoryPoolHeader * next)574 ATTRIBUTE_NO_SANITIZE_ADDRESS void FreeListAllocator<AllocConfigT, LockConfigT>::MemoryPoolHeader::Initialize(
575     size_t size, MemoryPoolHeader *prev, MemoryPoolHeader *next)
576 {
577     LOG_FREELIST_ALLOCATOR(DEBUG) << "Init a new memory pool with size " << size << " with prev link = " << std::hex
578                                   << prev << " and next link = " << next;
579     ASAN_UNPOISON_MEMORY_REGION(this, sizeof(MemoryPoolHeader));
580     size_ = size;
581     prev_ = prev;
582     next_ = next;
583     ASAN_POISON_MEMORY_REGION(this, sizeof(MemoryPoolHeader));
584 }
585 
586 template <typename AllocConfigT, typename LockConfigT>
AddMemoryBlock(FreeListHeader * freelist_header)587 void FreeListAllocator<AllocConfigT, LockConfigT>::SegregatedList::AddMemoryBlock(FreeListHeader *freelist_header)
588 {
589     size_t size = freelist_header->GetSize();
590     size_t index = GetIndex(size);
591     if (SEGREGATED_LIST_FAST_INSERT) {
592         free_memory_blocks_[index].InsertNext(freelist_header);
593     } else {
594         FreeListHeader *most_suitable = FindTheMostSuitableBlockInOrderedList(index, size);
595         // The most suitable block with such must be equal to this size,
596         // or the last with the bigger size in the ordered list,
597         // or nullptr
598         if (most_suitable == nullptr) {
599             free_memory_blocks_[index].InsertNext(freelist_header);
600         } else {
601             most_suitable->InsertNext(freelist_header);
602         }
603     }
604 }
605 
606 template <typename AllocConfigT, typename LockConfigT>
FindMemoryBlock(size_t size)607 freelist::FreeListHeader *FreeListAllocator<AllocConfigT, LockConfigT>::SegregatedList::FindMemoryBlock(size_t size)
608 {
609     size_t index = GetIndex(size);
610     FreeListHeader *head = GetFirstBlock(index);
611     FreeListHeader *suitable_block = nullptr;
612     if (head != nullptr) {
613         // We have some memory in this range. Try to find suitable block.
614         if (SEGREGATED_LIST_FAST_INSERT) {
615             // We don't have any order in inserting blocks,
616             // and we need to iterate over the whole list
617             FreeListHeader *current = head;
618             while (current != nullptr) {
619                 if (current->GetSize() < size) {
620                     current = current->GetNextFree();
621                     continue;
622                 }
623 
624                 if (SEGREGATED_LIST_FAST_EXTRACT) {
625                     suitable_block = current;
626                     break;
627                 }
628 
629                 if (suitable_block != nullptr) {
630                     suitable_block = current;
631                 } else {
632                     if (suitable_block->GetSize() > current->GetSize()) {
633                         suitable_block = current;
634                     }
635                 }
636 
637                 if (suitable_block->GetSize() == size) {
638                     break;
639                 }
640                 current = current->GetNextFree();
641             }
642         } else {
643             // All blocks in this list are in descending order.
644             // We can check the first one to determine if we have a block
645             // with this size or not.
646             if (head->GetSize() >= size) {
647                 if (SEGREGATED_LIST_FAST_EXTRACT) {
648                     // Just get the first element
649                     suitable_block = head;
650                 } else {
651                     // Try to find the mist suitable memory for this size.
652                     suitable_block = FindTheMostSuitableBlockInOrderedList(index, size);
653                 }
654             }
655         }
656     }
657 
658     if (suitable_block == nullptr) {
659         // We didn't find the block in the head list. Try to find block in other lists.
660         index++;
661         while (index < SEGREGATED_LIST_SIZE) {
662             if (GetFirstBlock(index) != nullptr) {
663                 if (SEGREGATED_LIST_FAST_INSERT || SEGREGATED_LIST_FAST_EXTRACT) {
664                     // Just get the first one
665                     suitable_block = GetFirstBlock(index);
666                 } else {
667                     suitable_block = FindTheMostSuitableBlockInOrderedList(index, size);
668                 }
669                 break;
670             }
671             index++;
672         }
673     }
674 
675     if (suitable_block != nullptr) {
676         ASSERT(suitable_block->GetSize() >= size);
677     }
678 
679     return suitable_block;
680 }
681 
682 template <typename AllocConfigT, typename LockConfigT>
ReleaseFreeMemoryBlocks()683 void FreeListAllocator<AllocConfigT, LockConfigT>::SegregatedList::ReleaseFreeMemoryBlocks()
684 {
685     for (size_t index = 0; index < SEGREGATED_LIST_SIZE; index++) {
686         FreeListHeader *current = GetFirstBlock(index);
687         while (current != nullptr) {
688             size_t block_size = current->GetSize();
689             // Start address from which we can release pages
690             uintptr_t start_addr = AlignUp(ToUintPtr(current) + sizeof(FreeListHeader), os::mem::GetPageSize());
691             // End address before which we can release pages
692             uintptr_t end_addr =
693                 os::mem::AlignDownToPageSize(ToUintPtr(current) + sizeof(MemoryBlockHeader) + block_size);
694             if (start_addr < end_addr) {
695                 os::mem::ReleasePages(start_addr, end_addr);
696             }
697             current = current->GetNextFree();
698         }
699     }
700 }
701 
702 template <typename AllocConfigT, typename LockConfigT>
703 freelist::FreeListHeader *
FindTheMostSuitableBlockInOrderedList(size_t index,size_t size)704 FreeListAllocator<AllocConfigT, LockConfigT>::SegregatedList::FindTheMostSuitableBlockInOrderedList(size_t index,
705                                                                                                     size_t size)
706 {
707     static_assert(!SEGREGATED_LIST_FAST_INSERT);
708     FreeListHeader *current = GetFirstBlock(index);
709     if (current == nullptr) {
710         return nullptr;
711     }
712     size_t current_size = current->GetSize();
713     if (current_size < size) {
714         return nullptr;
715     }
716     while (current_size != size) {
717         FreeListHeader *next = current->GetNextFree();
718         if (next == nullptr) {
719             // the current free list header is the last in the list.
720             break;
721         }
722         size_t next_size = next->GetSize();
723         if (next_size < size) {
724             // the next free list header is less than size,
725             // so we don't need to iterate anymore.
726             break;
727         }
728         current = next;
729         current_size = next_size;
730     }
731     return current;
732 }
733 
734 template <typename AllocConfigT, typename LockConfigT>
GetIndex(size_t size)735 size_t FreeListAllocator<AllocConfigT, LockConfigT>::SegregatedList::GetIndex(size_t size)
736 {
737     ASSERT(size >= FREELIST_ALLOCATOR_MIN_SIZE);
738     size_t index = (size - FREELIST_ALLOCATOR_MIN_SIZE) / SEGREGATED_LIST_FREE_BLOCK_RANGE;
739     return index < SEGREGATED_LIST_SIZE ? index : SEGREGATED_LIST_SIZE - 1;
740 }
741 
742 template <typename AllocConfigT, typename LockConfigT>
ContainObject(const ObjectHeader * obj)743 bool FreeListAllocator<AllocConfigT, LockConfigT>::ContainObject(const ObjectHeader *obj)
744 {
745     return AllocatedByFreeListAllocatorUnsafe(const_cast<ObjectHeader *>(obj));
746 }
747 
748 template <typename AllocConfigT, typename LockConfigT>
IsLive(const ObjectHeader * obj)749 bool FreeListAllocator<AllocConfigT, LockConfigT>::IsLive(const ObjectHeader *obj)
750 {
751     ASSERT(ContainObject(obj) == true);
752     void *obj_mem = static_cast<void *>(const_cast<ObjectHeader *>(obj));
753     // Get start address of pool via PoolManager for input object
754     // for avoid iteration over all pools in allocator
755     auto mem_pool_header =
756         static_cast<MemoryPoolHeader *>(PoolManager::GetMmapMemPool()->GetStartAddrPoolForAddr(obj_mem));
757     ASSERT(PoolManager::GetMmapMemPool()
758                ->GetAllocatorInfoForAddr(static_cast<void *>(mem_pool_header))
759                .GetAllocatorHeaderAddr()  // CODECHECK-NOLINT(C_RULE_ID_INDENT_CHECK)
760            == static_cast<const void *>(this));
761     MemoryBlockHeader *current_mem_header = mem_pool_header->GetFirstMemoryHeader();
762     while (current_mem_header != nullptr) {
763         if (current_mem_header->IsUsed()) {
764             if (current_mem_header->GetMemory() == obj_mem) {
765                 return true;
766             }
767         }
768         current_mem_header = current_mem_header->GetNextUsedHeader();
769     }
770     return false;
771 }
772 
773 #undef LOG_FREELIST_ALLOCATOR
774 
775 }  // namespace panda::mem
776 
777 #endif  // PANDA_RUNTIME_MEM_FREELIST_ALLOCATOR_INL_H_
778