• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_MEM_FREELIST_ALLOCATOR_INL_H
16 #define PANDA_MEM_FREELIST_ALLOCATOR_INL_H
17 
18 #include "libpandabase/utils/logger.h"
19 #include "runtime/mem/alloc_config.h"
20 #include "runtime/mem/freelist_allocator.h"
21 #include "runtime/mem/object_helpers.h"
22 
23 namespace panda::mem {
24 
25 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
26 #define LOG_FREELIST_ALLOCATOR(level) LOG(level, ALLOC) << "FreeListAllocator: "
27 
28 template <typename AllocConfigT, typename LockConfigT>
FreeListAllocator(MemStatsType * mem_stats,SpaceType type_allocation)29 FreeListAllocator<AllocConfigT, LockConfigT>::FreeListAllocator(MemStatsType *mem_stats, SpaceType type_allocation)
30     : type_allocation_(type_allocation), mem_stats_(mem_stats)
31 {
32     LOG_FREELIST_ALLOCATOR(DEBUG) << "Initializing FreeListAllocator";
33     ASAN_POISON_MEMORY_REGION(&segregated_list_, sizeof(segregated_list_));
34     LOG_FREELIST_ALLOCATOR(DEBUG) << "Initializing FreeListAllocator finished";
35 }
36 
37 template <typename AllocConfigT, typename LockConfigT>
~FreeListAllocator()38 FreeListAllocator<AllocConfigT, LockConfigT>::~FreeListAllocator()
39 {
40     LOG_FREELIST_ALLOCATOR(DEBUG) << "Destroying FreeListAllocator";
41     ASAN_UNPOISON_MEMORY_REGION(&segregated_list_, sizeof(segregated_list_));
42     LOG_FREELIST_ALLOCATOR(DEBUG) << "Destroying FreeListAllocator finished";
43 }
44 
45 template <typename AllocConfigT, typename LockConfigT>
46 template <bool need_lock>
Alloc(size_t size,Alignment align)47 void *FreeListAllocator<AllocConfigT, LockConfigT>::Alloc(size_t size, Alignment align)
48 {
49     os::memory::WriteLockHolder<LockConfigT, need_lock> wlock(alloc_free_lock_);
50     LOG_FREELIST_ALLOCATOR(DEBUG) << "Try to allocate object with size " << std::dec << size;
51     size_t alloc_size = size;
52     if (alloc_size < FREELIST_ALLOCATOR_MIN_SIZE) {
53         LOG_FREELIST_ALLOCATOR(DEBUG) << "Try to allocate an object with size less than min for this allocator";
54         alloc_size = FREELIST_ALLOCATOR_MIN_SIZE;
55     }
56     size_t def_aligned_size = AlignUp(alloc_size, GetAlignmentInBytes(FREELIST_DEFAULT_ALIGNMENT));
57     if (def_aligned_size > alloc_size) {
58         alloc_size = def_aligned_size;
59         LOG_FREELIST_ALLOCATOR(DEBUG) << "Align size to default alignment. New size = " << alloc_size;
60     }
61     if (alloc_size > FREELIST_MAX_ALLOC_SIZE) {
62         LOG_FREELIST_ALLOCATOR(DEBUG) << "Try allocate too big memory for free list allocator. Return nullptr";
63         return nullptr;
64     }
65     // Get best-fit memory piece from segregated list.
66     MemoryBlockHeader *memory_block = GetFromSegregatedList(alloc_size, align);
67     if (memory_block == nullptr) {
68         LOG_FREELIST_ALLOCATOR(DEBUG) << "Couldn't allocate memory";
69         return nullptr;
70     }
71     LOG_FREELIST_ALLOCATOR(DEBUG) << "Found memory block at addr = " << std::hex << memory_block << " with size "
72                                   << std::dec << memory_block->GetSize();
73     ASSERT(memory_block->GetSize() >= alloc_size);
74     uintptr_t memory_pointer = ToUintPtr(memory_block->GetMemory());
75     bool required_padding = false;
76     if ((memory_pointer & (GetAlignmentInBytes(align) - 1)) != 0U) {
77         LOG_FREELIST_ALLOCATOR(DEBUG) << "Raw memory is not aligned as we need. Create special header for padding";
78         // Raw memory pointer is not aligned as we expected
79         // We need to create extra header inside
80         uintptr_t aligned_memory_pointer =
81             AlignUp(memory_pointer + sizeof(MemoryBlockHeader), GetAlignmentInBytes(align));
82         size_t size_with_padding = alloc_size + (aligned_memory_pointer - memory_pointer);
83         ASSERT(memory_block->GetSize() >= size_with_padding);
84         auto padding_header =
85             static_cast<MemoryBlockHeader *>(ToVoidPtr(aligned_memory_pointer - sizeof(MemoryBlockHeader)));
86         LOG_FREELIST_ALLOCATOR(DEBUG) << "Created padding header at addr " << std::hex << padding_header;
87         padding_header->Initialize(alloc_size, memory_block);
88         padding_header->SetAsPaddingHeader();
89         // Update values
90         memory_pointer = aligned_memory_pointer;
91         alloc_size = size_with_padding;
92         required_padding = true;
93     }
94     if (CanCreateNewBlockFromRemainder(memory_block, alloc_size)) {
95         LOG_FREELIST_ALLOCATOR(DEBUG) << "Created new memory block from the remainder part:";
96         MemoryBlockHeader *new_free_block = SplitMemoryBlocks(memory_block, alloc_size);
97         LOG_FREELIST_ALLOCATOR(DEBUG) << "New block started at addr " << std::hex << new_free_block << " with size "
98                                       << std::dec << new_free_block->GetSize();
99         memory_block->SetUsed();
100         FreeListHeader *new_free_list_element = TryToCoalescing(new_free_block);
101         ASSERT(!new_free_list_element->IsUsed());
102         AddToSegregatedList(new_free_list_element);
103     } else {
104         LOG_FREELIST_ALLOCATOR(DEBUG) << "Can't create new block from the remainder. Use full block.";
105         memory_block->SetUsed();
106     }
107     if (required_padding) {
108         // We must update some values in current memory_block
109         uintptr_t padding_size = memory_pointer - ToUintPtr(memory_block->GetMemory());
110         if (padding_size == sizeof(MemoryBlockHeader)) {
111             LOG_FREELIST_ALLOCATOR(DEBUG) << "SetHasPaddingHeaderAfter";
112             memory_block->SetPaddingHeaderStoredAfterHeader();
113         } else {
114             LOG_FREELIST_ALLOCATOR(DEBUG) << "SetHasPaddingSizeAfter, size = " << padding_size;
115             memory_block->SetPaddingSizeStoredAfterHeader();
116             memory_block->SetPaddingSize(padding_size);
117         }
118     }
119     LOG_FREELIST_ALLOCATOR(DEBUG) << "Allocated memory at addr " << std::hex << ToVoidPtr(memory_pointer);
120     {
121         AllocConfigT::OnAlloc(memory_block->GetSize(), type_allocation_, mem_stats_);
122         // It is not the object size itself, because we can't compute it from MemoryBlockHeader structure at Free call.
123         // It is an approximation.
124         size_t current_size =
125             ToUintPtr(memory_block) + memory_block->GetSize() + sizeof(MemoryBlockHeader) - memory_pointer;
126         AllocConfigT::AddToCrossingMap(ToVoidPtr(memory_pointer), current_size);
127     }
128     ASAN_UNPOISON_MEMORY_REGION(ToVoidPtr(memory_pointer), size);
129     AllocConfigT::MemoryInit(ToVoidPtr(memory_pointer), size);
130     return ToVoidPtr(memory_pointer);
131 }
132 
133 template <typename AllocConfigT, typename LockConfigT>
Free(void * mem)134 void FreeListAllocator<AllocConfigT, LockConfigT>::Free(void *mem)
135 {
136     os::memory::WriteLockHolder wlock(alloc_free_lock_);
137     FreeUnsafe(mem);
138 }
139 
140 template <typename AllocConfigT, typename LockConfigT>
FreeUnsafe(void * mem)141 void FreeListAllocator<AllocConfigT, LockConfigT>::FreeUnsafe(void *mem)
142 {
143     if (UNLIKELY(mem == nullptr)) {
144         LOG_FREELIST_ALLOCATOR(DEBUG) << "Try to free memory at invalid addr 0";
145         return;
146     }
147     LOG_FREELIST_ALLOCATOR(DEBUG) << "Try to free memory at addr " << std::hex << mem;
148 #ifndef NDEBUG
149     if (!AllocatedByFreeListAllocatorUnsafe(mem)) {
150         LOG_FREELIST_ALLOCATOR(DEBUG) << "Try to free memory no from this allocator";
151         return;
152     }
153 #endif  // !NDEBUG
154 
155     MemoryBlockHeader *memory_header = GetFreeListMemoryHeader(mem);
156     LOG_FREELIST_ALLOCATOR(DEBUG) << "It is a memory with header " << std::hex << memory_header << " with size "
157                                   << std::dec << memory_header->GetSize() << " (probably with padding)";
158     {
159         AllocConfigT::OnFree(memory_header->GetSize(), type_allocation_, mem_stats_);
160         // It is not the object size itself, because we can't compute it from MemoryBlockHeader structure.
161         // It is an approximation.
162         size_t current_size =
163             ToUintPtr(memory_header) + memory_header->GetSize() + sizeof(MemoryBlockHeader) - ToUintPtr(mem);
164         MemoryBlockHeader *prev_used_header = memory_header->GetPrevUsedHeader();
165         void *prev_object = nullptr;
166         size_t prev_size = 0;
167         if (prev_used_header != nullptr) {
168             prev_object = prev_used_header->GetMemory();
169             prev_size = ToUintPtr(prev_used_header) + prev_used_header->GetSize() + sizeof(MemoryBlockHeader) -
170                         ToUintPtr(prev_used_header->GetMemory());
171         }
172         MemoryBlockHeader *next_used_header = memory_header->GetNextUsedHeader();
173         void *next_object = nullptr;
174         if (next_used_header != nullptr) {
175             next_object = next_used_header->GetMemory();
176         }
177         AllocConfigT::RemoveFromCrossingMap(mem, current_size, next_object, prev_object, prev_size);
178     }
179     memory_header->SetUnused();
180     memory_header->SetCommonHeader();
181     FreeListHeader *new_free_list_element = TryToCoalescing(memory_header);
182     ASAN_POISON_MEMORY_REGION(new_free_list_element, new_free_list_element->GetSize() + sizeof(MemoryBlockHeader));
183     AddToSegregatedList(new_free_list_element);
184     LOG_FREELIST_ALLOCATOR(DEBUG) << "Freed memory at addr " << std::hex << mem;
185 }
186 
187 // TODO(aemelenko): We can create a mutex for each pool to increase performance,
188 // but it requires pool alignment restriction
189 // (we must compute memory pool header addr from a memory block addr stored inside it)
190 
191 // During Collect method call we iterate over memory blocks in each pool.
192 // This iteration can cause race conditions in multithreaded mode.
193 // E.g. in this scenario:
194 // |-------|---------|-------------------|------------------------------------------------------------------|
195 // | time: | Thread: |    Description:   |                         Memory footprint:                        |
196 // |-------|---------|-------------------|------------------------------------------------------------------|
197 // |       |         | Thread0 starts    |  |..............Free  Block.............|...Allocated block...|  |
198 // |       |         | iterating         |  |                                                               |
199 // |   0   |    0    | over mem blocks   |  current block pointer                                           |
200 // |       |         | and current block |                                                                  |
201 // |       |         | is free block     |                                                                  |
202 // |-------|---------|-------------------|------------------------------------------------------------------|
203 // |       |         | Thread1           |  |...Allocated block...|................|...Allocated block...|  |
204 // |   1   |    1    | allocates memory  |                        |                                         |
205 // |       |         | at this block     |               Unused memory peace                                |
206 // |       |         |                   |                                                                  |
207 // |-------|---------|-------------------|------------------------------------------------------------------|
208 // |       |         | Thread1           |  |...Allocated block...|................|...Allocated block...|  |
209 // |   2   |    1    | set up values in  |  |                                                               |
210 // |       |         | this block header |  change size of this block                                       |
211 // |       |         | (set up size)     |                                                                  |
212 // |-------|---------|-------------------|------------------------------------------------------------------|
213 // |       |         | Thread0 reads     |  |...Allocated block...|................|...Allocated block...|  |
214 // |       |         | some garbage or   |                                                                  |
215 // |   3   |    0    | wrong value to    |  current block pointer - points to wrong memory                  |
216 // |       |         | calculate next    |                                                                  |
217 // |       |         | block pointer     |                                                                  |
218 // |-------|---------|-------------------|------------------------------------------------------------------|
219 //
220 // Therefore, we must unlock allocator's alloc/free methods only
221 // when we have a pointer to allocated block (i.e. IsUsed())
222 
223 template <typename AllocConfigT, typename LockConfigT>
Collect(const GCObjectVisitor & death_checker_fn)224 void FreeListAllocator<AllocConfigT, LockConfigT>::Collect(const GCObjectVisitor &death_checker_fn)
225 {
226     LOG_FREELIST_ALLOCATOR(DEBUG) << "Collecting started";
227     // TODO(aemelenko): Looks like we can unlock alloc_free_lock_ for not dead objects during collection
228     IterateOverObjects([&](ObjectHeader *mem) {
229         if (death_checker_fn(mem) == ObjectStatus::DEAD_OBJECT) {
230             LOG(DEBUG, GC) << "DELETE OBJECT " << GetDebugInfoAboutObject(mem);
231             FreeUnsafe(mem);
232         }
233     });
234     LOG_FREELIST_ALLOCATOR(DEBUG) << "Collecting finished";
235 }
236 
237 template <typename AllocConfigT, typename LockConfigT>
238 template <typename ObjectVisitor>
IterateOverObjects(const ObjectVisitor & object_visitor)239 void FreeListAllocator<AllocConfigT, LockConfigT>::IterateOverObjects(const ObjectVisitor &object_visitor)
240 {
241     LOG_FREELIST_ALLOCATOR(DEBUG) << "Iterating over objects started";
242     MemoryPoolHeader *current_pool = nullptr;
243     {
244         // Do this under lock because the pointer for mempool_tail can be changed by other threads
245         // in AddMemoryPool method call.
246         // NOTE: We add each new pool at the mempool_tail_. Therefore, we can read it once and iterate to head.
247         os::memory::ReadLockHolder rlock(alloc_free_lock_);
248         current_pool = mempool_tail_;
249     }
250     while (current_pool != nullptr) {
251         LOG_FREELIST_ALLOCATOR(DEBUG) << "  iterate over " << std::hex << current_pool;
252         MemoryBlockHeader *current_mem_header = current_pool->GetFirstMemoryHeader();
253         while (current_mem_header != nullptr) {
254             // Lock any possible memory headers changes in the allocator.
255             os::memory::WriteLockHolder wlock(alloc_free_lock_);
256             if (current_mem_header->IsUsed()) {
257                 // We can call Free inside mem_visitor, that's why we should lock everything
258                 object_visitor(static_cast<ObjectHeader *>(current_mem_header->GetMemory()));
259                 // At this point, current_mem_header can point to a nonexistent memory block
260                 // (e.g., if we coalesce it with the previous free block).
261                 // However, we still have valid MemoryBlockHeader class field here.
262                 // NOTE: Firstly, we coalesce current_mem_header block with next (if it is free)
263                 // and update current block size. Therefore, we have a valid pointer to next memory block.
264 
265                 // After calling mem_visitor() the current_mem_header may points to free block of memory,
266                 // which can be modified at the alloc call in other thread.
267                 // Therefore, do not unlock until we have a pointer to the Used block.
268                 current_mem_header = current_mem_header->GetNextUsedHeader();
269             } else {
270                 // Current header marked as Unused so it can be modify by an allocation in some thread.
271                 // So, read next header with in locked state.
272                 current_mem_header = current_mem_header->GetNextUsedHeader();
273             }
274             // We have a pointer to Used memory block, or to nullptr.
275             // Therefore, we can unlock.
276         }
277         current_pool = current_pool->GetPrev();
278     }
279     LOG_FREELIST_ALLOCATOR(DEBUG) << "Iterating over objects finished";
280 }
281 
282 template <typename AllocConfigT, typename LockConfigT>
AddMemoryPool(void * mem,size_t size)283 bool FreeListAllocator<AllocConfigT, LockConfigT>::AddMemoryPool(void *mem, size_t size)
284 {
285     // Lock alloc/free because we add new block to segregated list here.
286     os::memory::WriteLockHolder wlock(alloc_free_lock_);
287     ASSERT(mem != nullptr);
288     LOG_FREELIST_ALLOCATOR(DEBUG) << "Add memory pool to FreeListAllocator from  " << std::hex << mem << " with size "
289                                   << std::dec << size;
290     ASSERT((ToUintPtr(mem) & (sizeof(MemoryBlockHeader) - 1)) == 0U);
291     auto mempool_header = static_cast<MemoryPoolHeader *>(mem);
292     if (mempool_head_ == nullptr) {
293         LOG_FREELIST_ALLOCATOR(DEBUG) << "Initialize mempool_head_";
294         mempool_header->Initialize(size, nullptr, nullptr);
295         mempool_head_ = mempool_header;
296         mempool_tail_ = mempool_header;
297     } else {
298         LOG_FREELIST_ALLOCATOR(DEBUG) << "Add this memory pool at the tail after block " << std::hex << mempool_tail_;
299         mempool_header->Initialize(size, mempool_tail_, nullptr);
300         mempool_tail_->SetNext(mempool_header);
301         mempool_tail_ = mempool_header;
302     }
303     MemoryBlockHeader *first_mem_header = mempool_header->GetFirstMemoryHeader();
304     first_mem_header->Initialize(size - sizeof(MemoryBlockHeader) - sizeof(MemoryPoolHeader), nullptr);
305     first_mem_header->SetLastBlockInPool();
306     AddToSegregatedList(static_cast<FreeListHeader *>(first_mem_header));
307     ASAN_POISON_MEMORY_REGION(mem, size);
308     AllocConfigT::InitializeCrossingMapForMemory(mem, size);
309     return true;
310 }
311 
312 template <typename AllocConfigT, typename LockConfigT>
313 template <typename MemVisitor>
VisitAndRemoveAllPools(const MemVisitor & mem_visitor)314 void FreeListAllocator<AllocConfigT, LockConfigT>::VisitAndRemoveAllPools(const MemVisitor &mem_visitor)
315 {
316     // We call this method and return pools to the system.
317     // Therefore, delete all objects to clear all external dependences
318     LOG_FREELIST_ALLOCATOR(DEBUG) << "Clear all objects inside the allocator";
319     // Lock everything to avoid race condition.
320     os::memory::WriteLockHolder wlock(alloc_free_lock_);
321     MemoryPoolHeader *current_pool = mempool_head_;
322     while (current_pool != nullptr) {
323         // Use tmp in case if visitor with side effects
324         MemoryPoolHeader *tmp = current_pool->GetNext();
325         AllocConfigT::RemoveCrossingMapForMemory(current_pool, current_pool->GetSize());
326         mem_visitor(current_pool, current_pool->GetSize());
327         current_pool = tmp;
328     }
329 }
330 
331 template <typename AllocConfigT, typename LockConfigT>
332 template <typename MemVisitor>
VisitAndRemoveFreePools(const MemVisitor & mem_visitor)333 void FreeListAllocator<AllocConfigT, LockConfigT>::VisitAndRemoveFreePools(const MemVisitor &mem_visitor)
334 {
335     // Lock everything to avoid race condition.
336     os::memory::WriteLockHolder wlock(alloc_free_lock_);
337     MemoryPoolHeader *current_pool = mempool_head_;
338     while (current_pool != nullptr) {
339         // Use tmp in case if visitor with side effects
340         MemoryPoolHeader *tmp = current_pool->GetNext();
341         MemoryBlockHeader *first_block = current_pool->GetFirstMemoryHeader();
342         if (first_block->IsLastBlockInPool() && !first_block->IsUsed()) {
343             // We have only one big memory block in this pool,
344             // and this block is not used
345             LOG_FREELIST_ALLOCATOR(DEBUG)
346                 << "VisitAndRemoveFreePools: Remove free memory pool from allocator with start addr" << std::hex
347                 << current_pool << " and size " << std::dec << current_pool->GetSize()
348                 << " bytes with the first block at addr " << std::hex << first_block << " and size " << std::dec
349                 << first_block->GetSize();
350             auto free_header = static_cast<FreeListHeader *>(first_block);
351             free_header->PopFromFreeList();
352             MemoryPoolHeader *next = current_pool->GetNext();
353             MemoryPoolHeader *prev = current_pool->GetPrev();
354             if (next != nullptr) {
355                 ASSERT(next->GetPrev() == current_pool);
356                 next->SetPrev(prev);
357             } else {
358                 // This means that current pool is the last
359                 ASSERT(mempool_tail_ == current_pool);
360                 LOG_FREELIST_ALLOCATOR(DEBUG) << "VisitAndRemoveFreePools: Change pools tail pointer";
361                 mempool_tail_ = prev;
362             }
363             if (prev != nullptr) {
364                 ASSERT(prev->GetNext() == current_pool);
365                 prev->SetNext(next);
366             } else {
367                 // This means that current pool is the first
368                 ASSERT(mempool_head_ == current_pool);
369                 LOG_FREELIST_ALLOCATOR(DEBUG) << "VisitAndRemoveFreePools: Change pools head pointer";
370                 mempool_head_ = next;
371             }
372             AllocConfigT::RemoveCrossingMapForMemory(current_pool, current_pool->GetSize());
373             mem_visitor(current_pool, current_pool->GetSize());
374         }
375         current_pool = tmp;
376     }
377     segregated_list_.ReleaseFreeMemoryBlocks();
378 }
379 
380 template <typename AllocConfigT, typename LockConfigT>
381 template <typename MemVisitor>
IterateOverObjectsInRange(const MemVisitor & mem_visitor,void * left_border,void * right_border)382 void FreeListAllocator<AllocConfigT, LockConfigT>::IterateOverObjectsInRange(const MemVisitor &mem_visitor,
383                                                                              void *left_border, void *right_border)
384 {
385     // NOTE: Current implementation doesn't look at PANDA_CROSSING_MAP_MANAGE_CROSSED_BORDER flag
386     LOG_FREELIST_ALLOCATOR(DEBUG) << "FreeListAllocator::IterateOverObjectsInRange for range [" << std::hex
387                                   << left_border << ", " << right_border << "]";
388     ASSERT(ToUintPtr(right_border) >= ToUintPtr(left_border));
389     // TODO(aemelenko): These are temporary asserts because we can't do anything
390     // if the range crosses different allocators memory pools
391     ASSERT(ToUintPtr(right_border) - ToUintPtr(left_border) ==
392            (CrossingMapSingleton::GetCrossingMapGranularity() - 1U));
393     ASSERT((ToUintPtr(right_border) & (~(CrossingMapSingleton::GetCrossingMapGranularity() - 1U))) ==
394            (ToUintPtr(left_border) & (~(CrossingMapSingleton::GetCrossingMapGranularity() - 1U))));
395     MemoryBlockHeader *first_memory_header = nullptr;
396     {
397         // Do this under lock because the pointer to the first object in CrossingMap can be changed during
398         // CrossingMap call.
399         os::memory::ReadLockHolder rlock(alloc_free_lock_);
400         if (!AllocatedByFreeListAllocatorUnsafe(left_border) && !AllocatedByFreeListAllocatorUnsafe(right_border)) {
401             LOG_FREELIST_ALLOCATOR(DEBUG) << "This memory range is not covered by this allocator";
402             return;
403         }
404         void *obj_addr = AllocConfigT::FindFirstObjInCrossingMap(left_border, right_border);
405         if (obj_addr == nullptr) {
406             return;
407         }
408         ASSERT(AllocatedByFreeListAllocatorUnsafe(obj_addr));
409         MemoryBlockHeader *memory_header = GetFreeListMemoryHeader(obj_addr);
410         // Memory header is a pointer to an object which starts in this range or in the previous.
411         // In the second case, this object may not crosses the border of the current range.
412         // (but there is an object stored after it, which crosses the current range).
413         ASSERT(ToUintPtr(memory_header->GetMemory()) <= ToUintPtr(right_border));
414         first_memory_header = memory_header;
415     }
416     ASSERT(first_memory_header != nullptr);
417     // Let's start iteration:
418     MemoryBlockHeader *current_mem_header = first_memory_header;
419     LOG_FREELIST_ALLOCATOR(DEBUG) << "FreeListAllocator::IterateOverObjectsInRange start iterating from obj with addr "
420                                   << std::hex << first_memory_header->GetMemory();
421     while (current_mem_header != nullptr) {
422         // We don't lock allocator because we point to the used block which can't be changed
423         // during the iteration in range.
424         void *obj_addr = current_mem_header->GetMemory();
425         if (ToUintPtr(obj_addr) > ToUintPtr(right_border)) {
426             // Iteration over
427             break;
428         }
429         LOG_FREELIST_ALLOCATOR(DEBUG)
430             << "FreeListAllocator::IterateOverObjectsInRange found obj in this range with addr " << std::hex
431             << obj_addr;
432         mem_visitor(static_cast<ObjectHeader *>(obj_addr));
433         {
434             os::memory::ReadLockHolder rlock(alloc_free_lock_);
435             current_mem_header = current_mem_header->GetNextUsedHeader();
436         }
437     }
438     LOG_FREELIST_ALLOCATOR(DEBUG) << "FreeListAllocator::IterateOverObjectsInRange finished";
439 }
440 
441 template <typename AllocConfigT, typename LockConfigT>
CoalesceMemoryBlocks(MemoryBlockHeader * first_block,MemoryBlockHeader * second_block)442 void FreeListAllocator<AllocConfigT, LockConfigT>::CoalesceMemoryBlocks(MemoryBlockHeader *first_block,
443                                                                         MemoryBlockHeader *second_block)
444 {
445     LOG_FREELIST_ALLOCATOR(DEBUG) << "CoalesceMemoryBlock: "
446                                   << "first block = " << std::hex << first_block << " with size " << std::dec
447                                   << first_block->GetSize() << " ; second block = " << std::hex << second_block
448                                   << " with size " << std::dec << second_block->GetSize();
449     ASSERT(first_block != nullptr);
450     ASSERT(first_block->GetNextHeader() == second_block);
451     ASSERT(first_block->CanBeCoalescedWithNext() || second_block->CanBeCoalescedWithPrev());
452     first_block->Initialize(first_block->GetSize() + second_block->GetSize() + sizeof(MemoryBlockHeader),
453                             first_block->GetPrevHeader());
454     if (second_block->IsLastBlockInPool()) {
455         LOG_FREELIST_ALLOCATOR(DEBUG) << "CoalesceMemoryBlock: second_block was the last in a pool";
456         first_block->SetLastBlockInPool();
457     } else {
458         ASSERT(first_block->GetNextHeader() != nullptr);
459         first_block->GetNextHeader()->SetPrevHeader(first_block);
460     }
461 }
462 
463 template <typename AllocConfigT, typename LockConfigT>
SplitMemoryBlocks(MemoryBlockHeader * memory_block,size_t first_block_size)464 freelist::MemoryBlockHeader *FreeListAllocator<AllocConfigT, LockConfigT>::SplitMemoryBlocks(
465     MemoryBlockHeader *memory_block, size_t first_block_size)
466 {
467     ASSERT(memory_block->GetSize() > (first_block_size + sizeof(MemoryBlockHeader)));
468     ASSERT(!memory_block->IsUsed());
469     auto second_block =
470         static_cast<MemoryBlockHeader *>(ToVoidPtr(ToUintPtr(memory_block->GetMemory()) + first_block_size));
471     size_t second_block_size = memory_block->GetSize() - first_block_size - sizeof(MemoryBlockHeader);
472     second_block->Initialize(second_block_size, memory_block);
473     if (memory_block->IsLastBlockInPool()) {
474         second_block->SetLastBlockInPool();
475     } else {
476         ASSERT(second_block->GetNextHeader() != nullptr);
477         second_block->GetNextHeader()->SetPrevHeader(second_block);
478     }
479     memory_block->Initialize(first_block_size, memory_block->GetPrevHeader());
480     return second_block;
481 }
482 
483 template <typename AllocConfigT, typename LockConfigT>
GetFreeListMemoryHeader(void * mem)484 freelist::MemoryBlockHeader *FreeListAllocator<AllocConfigT, LockConfigT>::GetFreeListMemoryHeader(void *mem)
485 {
486     ASSERT(mem != nullptr);
487     auto memory_header = static_cast<MemoryBlockHeader *>(ToVoidPtr(ToUintPtr(mem) - sizeof(MemoryBlockHeader)));
488     if (!memory_header->IsPaddingHeader()) {
489         // We got correct header of this memory, just return it
490         return memory_header;
491     }
492     // This is aligned memory with some free space before the memory pointer
493     // The previous header must be a pointer to correct header of this memory block
494     LOG_FREELIST_ALLOCATOR(DEBUG) << "It is a memory with padding at head";
495     return memory_header->GetPrevHeader();
496 }
497 
498 template <typename AllocConfigT, typename LockConfigT>
AllocatedByFreeListAllocator(void * mem)499 bool FreeListAllocator<AllocConfigT, LockConfigT>::AllocatedByFreeListAllocator(void *mem)
500 {
501     os::memory::ReadLockHolder rlock(alloc_free_lock_);
502     return AllocatedByFreeListAllocatorUnsafe(mem);
503 }
504 
505 template <typename AllocConfigT, typename LockConfigT>
AllocatedByFreeListAllocatorUnsafe(void * mem)506 bool FreeListAllocator<AllocConfigT, LockConfigT>::AllocatedByFreeListAllocatorUnsafe(void *mem)
507 {
508     // TODO(aemelenko): Create more complex solution
509     MemoryPoolHeader *current_pool = mempool_head_;
510     while (current_pool != nullptr) {
511         // This assert means that we asked about memory inside MemoryPoolHeader
512         ASSERT(!((ToUintPtr(current_pool->GetFirstMemoryHeader()) > ToUintPtr(mem)) &&
513                  (ToUintPtr(current_pool) < ToUintPtr(mem))));
514         if ((ToUintPtr(current_pool->GetFirstMemoryHeader()) < ToUintPtr(mem)) &&
515             ((ToUintPtr(current_pool) + current_pool->GetSize()) > ToUintPtr(mem))) {
516             return true;
517         }
518         current_pool = current_pool->GetNext();
519     }
520     return false;
521 }
522 
523 template <typename AllocConfigT, typename LockConfigT>
TryToCoalescing(MemoryBlockHeader * memory_header)524 freelist::FreeListHeader *FreeListAllocator<AllocConfigT, LockConfigT>::TryToCoalescing(
525     MemoryBlockHeader *memory_header)
526 {
527     ASSERT(memory_header != nullptr);
528     LOG_FREELIST_ALLOCATOR(DEBUG) << "TryToCoalescing memory block";
529     if (memory_header->CanBeCoalescedWithNext()) {
530         ASSERT(!memory_header->GetNextHeader()->IsUsed());
531         LOG_FREELIST_ALLOCATOR(DEBUG) << "Coalesce with next block";
532         auto next_free_list = static_cast<FreeListHeader *>(memory_header->GetNextHeader());
533         ASSERT(next_free_list != nullptr);
534         // Pop this free list element from the list
535         next_free_list->PopFromFreeList();
536         // Combine these two blocks together
537         CoalesceMemoryBlocks(memory_header, static_cast<MemoryBlockHeader *>(next_free_list));
538     }
539     if (memory_header->CanBeCoalescedWithPrev()) {
540         ASSERT(!memory_header->GetPrevHeader()->IsUsed());
541         LOG_FREELIST_ALLOCATOR(DEBUG) << "Coalesce with prev block";
542         auto prev_free_list = static_cast<FreeListHeader *>(memory_header->GetPrevHeader());
543         // Pop this free list element from the list
544         prev_free_list->PopFromFreeList();
545         // Combine these two blocks together
546         CoalesceMemoryBlocks(static_cast<MemoryBlockHeader *>(prev_free_list), memory_header);
547         memory_header = static_cast<MemoryBlockHeader *>(prev_free_list);
548     }
549     return static_cast<FreeListHeader *>(memory_header);
550 }
551 
552 template <typename AllocConfigT, typename LockConfigT>
AddToSegregatedList(FreeListHeader * free_list_element)553 void FreeListAllocator<AllocConfigT, LockConfigT>::AddToSegregatedList(FreeListHeader *free_list_element)
554 {
555     LOG_FREELIST_ALLOCATOR(DEBUG) << "AddToSegregatedList: Add new block into segregated-list with size "
556                                   << free_list_element->GetSize();
557     segregated_list_.AddMemoryBlock(free_list_element);
558 }
559 
560 template <typename AllocConfigT, typename LockConfigT>
GetFromSegregatedList(size_t size,Alignment align)561 freelist::MemoryBlockHeader *FreeListAllocator<AllocConfigT, LockConfigT>::GetFromSegregatedList(size_t size,
562                                                                                                  Alignment align)
563 {
564     LOG_FREELIST_ALLOCATOR(DEBUG) << "GetFromSegregatedList: Try to find memory for size " << size << " with alignment "
565                                   << align;
566     size_t aligned_size = size;
567     if (align != FREELIST_DEFAULT_ALIGNMENT) {
568         // TODO(aemelenko): This is raw but fast solution with bigger fragmentation.
569         // It's better to add here this value, but I'm not 100% sure about all corner cases.
570         // (GetAlignmentInBytes(align) + sizeof(MemoryBlockHeader) - GetAlignmentInBytes(FREELIST_DEFAULT_ALIGNMENT))
571         aligned_size += (GetAlignmentInBytes(align) + sizeof(MemoryBlockHeader));
572     }
573     FreeListHeader *mem_block = segregated_list_.FindMemoryBlock(aligned_size);
574     if (mem_block != nullptr) {
575         mem_block->PopFromFreeList();
576         ASSERT((AlignUp(ToUintPtr(mem_block->GetMemory()), GetAlignmentInBytes(align)) -
577                 ToUintPtr(mem_block->GetMemory()) + size) <= mem_block->GetSize());
578     }
579     return static_cast<MemoryBlockHeader *>(mem_block);
580 }
581 
582 template <typename AllocConfigT, typename LockConfigT>
Initialize(size_t size,MemoryPoolHeader * prev,MemoryPoolHeader * next)583 ATTRIBUTE_NO_SANITIZE_ADDRESS void FreeListAllocator<AllocConfigT, LockConfigT>::MemoryPoolHeader::Initialize(
584     size_t size, MemoryPoolHeader *prev, MemoryPoolHeader *next)
585 {
586     LOG_FREELIST_ALLOCATOR(DEBUG) << "Init a new memory pool with size " << size << " with prev link = " << std::hex
587                                   << prev << " and next link = " << next;
588     ASAN_UNPOISON_MEMORY_REGION(this, sizeof(MemoryPoolHeader));
589     size_ = size;
590     prev_ = prev;
591     next_ = next;
592     ASAN_POISON_MEMORY_REGION(this, sizeof(MemoryPoolHeader));
593 }
594 
595 template <typename AllocConfigT, typename LockConfigT>
AddMemoryBlock(FreeListHeader * freelist_header)596 void FreeListAllocator<AllocConfigT, LockConfigT>::SegregatedList::AddMemoryBlock(FreeListHeader *freelist_header)
597 {
598     size_t size = freelist_header->GetSize();
599     size_t index = GetIndex(size);
600     if (SEGREGATED_LIST_FAST_INSERT) {
601         free_memory_blocks_[index].InsertNext(freelist_header);
602     } else {
603         FreeListHeader *most_suitable = FindTheMostSuitableBlockInOrderedList(index, size);
604         // The most suitable block with such must be equal to this size,
605         // or the last with the bigger size in the ordered list,
606         // or nullptr
607         if (most_suitable == nullptr) {
608             free_memory_blocks_[index].InsertNext(freelist_header);
609         } else {
610             most_suitable->InsertNext(freelist_header);
611         }
612     }
613 }
614 
615 template <typename AllocConfigT, typename LockConfigT>
FindMemoryBlock(size_t size)616 freelist::FreeListHeader *FreeListAllocator<AllocConfigT, LockConfigT>::SegregatedList::FindMemoryBlock(size_t size)
617 {
618     size_t index = GetIndex(size);
619     FreeListHeader *head = GetFirstBlock(index);
620     FreeListHeader *suitable_block = nullptr;
621     if (head != nullptr) {
622         // We have some memory in this range. Try to find suitable block.
623         if (SEGREGATED_LIST_FAST_INSERT) {
624             // We don't have any order in inserting blocks,
625             // and we need to iterate over the whole list
626             FreeListHeader *current = head;
627             while (current != nullptr) {
628                 if (current->GetSize() >= size) {
629                     if (SEGREGATED_LIST_FAST_EXTRACT) {
630                         suitable_block = current;
631                         break;
632                     } else {  // NOLINT(readability-else-after-return)
633                         if (suitable_block != nullptr) {
634                             suitable_block = current;
635                         } else {
636                             if (suitable_block->GetSize() > current->GetSize()) {
637                                 suitable_block = current;
638                             }
639                         }
640                         if (suitable_block->GetSize() == size) {
641                             break;
642                         }
643                     }
644                 }
645                 current = current->GetNextFree();
646             }
647         } else {
648             // All blocks in this list are in descending order.
649             // We can check the first one to determine if we have a block
650             // with this size or not.
651             if (head->GetSize() >= size) {
652                 if (SEGREGATED_LIST_FAST_EXTRACT) {
653                     // Just get the first element
654                     suitable_block = head;
655                 } else {
656                     // Try to find the mist suitable memory for this size.
657                     suitable_block = FindTheMostSuitableBlockInOrderedList(index, size);
658                 }
659             }
660         }
661     }
662 
663     if (suitable_block == nullptr) {
664         // We didn't find the block in the head list. Try to find block in other lists.
665         index++;
666         while (index < SEGREGATED_LIST_SIZE) {
667             if (GetFirstBlock(index) != nullptr) {
668                 if (SEGREGATED_LIST_FAST_INSERT || SEGREGATED_LIST_FAST_EXTRACT) {
669                     // Just get the first one
670                     suitable_block = GetFirstBlock(index);
671                 } else {
672                     suitable_block = FindTheMostSuitableBlockInOrderedList(index, size);
673                 }
674                 break;
675             }
676             index++;
677         }
678     }
679 
680     if (suitable_block != nullptr) {
681         ASSERT(suitable_block->GetSize() >= size);
682     }
683 
684     return suitable_block;
685 }
686 
687 template <typename AllocConfigT, typename LockConfigT>
ReleaseFreeMemoryBlocks()688 void FreeListAllocator<AllocConfigT, LockConfigT>::SegregatedList::ReleaseFreeMemoryBlocks()
689 {
690     for (size_t index = 0; index < SEGREGATED_LIST_SIZE; index++) {
691         FreeListHeader *current = GetFirstBlock(index);
692         while (current != nullptr) {
693             size_t block_size = current->GetSize();
694             // Start address from which we can release pages
695             uintptr_t start_addr = AlignUp(ToUintPtr(current) + sizeof(FreeListHeader), os::mem::GetPageSize());
696             // End address before which we can release pages
697             uintptr_t end_addr =
698                 os::mem::AlignDownToPageSize(ToUintPtr(current) + sizeof(MemoryBlockHeader) + block_size);
699             if (start_addr < end_addr) {
700                 os::mem::ReleasePages(start_addr, end_addr);
701             }
702             current = current->GetNextFree();
703         }
704     }
705 }
706 
707 template <typename AllocConfigT, typename LockConfigT>
708 freelist::FreeListHeader *
FindTheMostSuitableBlockInOrderedList(size_t index,size_t size)709 FreeListAllocator<AllocConfigT, LockConfigT>::SegregatedList::FindTheMostSuitableBlockInOrderedList(size_t index,
710                                                                                                     size_t size)
711 {
712     static_assert(!SEGREGATED_LIST_FAST_INSERT);
713     FreeListHeader *current = GetFirstBlock(index);
714     if (current == nullptr) {
715         return nullptr;
716     }
717     size_t current_size = current->GetSize();
718     if (current_size < size) {
719         return nullptr;
720     }
721     while (current_size != size) {
722         FreeListHeader *next = current->GetNextFree();
723         if (next == nullptr) {
724             // the current free list header is the last in the list.
725             break;
726         }
727         size_t next_size = next->GetSize();
728         if (next_size < size) {
729             // the next free list header is less than size,
730             // so we don't need to iterate anymore.
731             break;
732         }
733         current = next;
734         current_size = next_size;
735     }
736     return current;
737 }
738 
739 template <typename AllocConfigT, typename LockConfigT>
GetIndex(size_t size)740 size_t FreeListAllocator<AllocConfigT, LockConfigT>::SegregatedList::GetIndex(size_t size)
741 {
742     ASSERT(size >= FREELIST_ALLOCATOR_MIN_SIZE);
743     size_t index = (size - FREELIST_ALLOCATOR_MIN_SIZE) / SEGREGATED_LIST_FREE_BLOCK_RANGE;
744     return index < SEGREGATED_LIST_SIZE ? index : SEGREGATED_LIST_SIZE - 1;
745 }
746 
747 template <typename AllocConfigT, typename LockConfigT>
ContainObject(const ObjectHeader * obj)748 bool FreeListAllocator<AllocConfigT, LockConfigT>::ContainObject(const ObjectHeader *obj)
749 {
750     return AllocatedByFreeListAllocatorUnsafe(const_cast<ObjectHeader *>(obj));
751 }
752 
753 template <typename AllocConfigT, typename LockConfigT>
IsLive(const ObjectHeader * obj)754 bool FreeListAllocator<AllocConfigT, LockConfigT>::IsLive(const ObjectHeader *obj)
755 {
756     ASSERT(ContainObject(obj));
757     void *obj_mem = static_cast<void *>(const_cast<ObjectHeader *>(obj));
758     // Get start address of pool via PoolManager for input object
759     // for avoid iteration over all pools in allocator
760     auto mem_pool_header =
761         static_cast<MemoryPoolHeader *>(PoolManager::GetMmapMemPool()->GetStartAddrPoolForAddr(obj_mem));
762     [[maybe_unused]] auto alloc_info =
763         PoolManager::GetMmapMemPool()->GetAllocatorInfoForAddr(static_cast<void *>(mem_pool_header));
764     ASSERT(alloc_info.GetAllocatorHeaderAddr() == static_cast<const void *>(this));
765     MemoryBlockHeader *current_mem_header = mem_pool_header->GetFirstMemoryHeader();
766     while (current_mem_header != nullptr) {
767         if (current_mem_header->IsUsed()) {
768             if (current_mem_header->GetMemory() == obj_mem) {
769                 return true;
770             }
771         }
772         current_mem_header = current_mem_header->GetNextUsedHeader();
773     }
774     return false;
775 }
776 
777 #undef LOG_FREELIST_ALLOCATOR
778 
779 }  // namespace panda::mem
780 
781 #endif  // PANDA_MEM_FREELIST_ALLOCATOR_INL_H
782