• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_RUNTIME_MEM_RUNSLOTS_ALLOCATOR_INL_H
16 #define PANDA_RUNTIME_MEM_RUNSLOTS_ALLOCATOR_INL_H
17 
18 #include <securec.h>
19 #include "libpandabase/utils/asan_interface.h"
20 #include "runtime/mem/alloc_config.h"
21 #include "runtime/mem/object_helpers.h"
22 #include "runtime/mem/runslots_allocator.h"
23 
24 namespace ark::mem {
25 
26 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
27 #define LOG_RUNSLOTS_ALLOCATOR(level) LOG(level, ALLOC) << "RunSlotsAllocator: "
28 
29 template <typename AllocConfigT, typename LockConfigT>
RunSlotsAllocator(MemStatsType * memStats,SpaceType typeAllocation)30 inline RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsAllocator(MemStatsType *memStats, SpaceType typeAllocation)
31     : typeAllocation_(typeAllocation), memStats_(memStats)
32 {
33     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Initializing RunSlotsAllocator";
34     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Initializing RunSlotsAllocator finished";
35 }
36 
37 template <typename AllocConfigT, typename LockConfigT>
~RunSlotsAllocator()38 inline RunSlotsAllocator<AllocConfigT, LockConfigT>::~RunSlotsAllocator()
39 {
40     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Destroying RunSlotsAllocator";
41     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Destroying RunSlotsAllocator finished";
42 }
43 
44 template <typename AllocConfigT, typename LockConfigT>
45 template <bool NEED_LOCK, bool DISABLE_USE_FREE_RUNSLOTS>
Alloc(size_t size,Alignment align)46 inline void *RunSlotsAllocator<AllocConfigT, LockConfigT>::Alloc(size_t size, Alignment align)
47 {
48     using ListLock = typename LockConfigT::ListLock;
49     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Try to allocate " << size << " bytes of memory with align " << align;
50     if (size == 0) {
51         LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Failed to allocate - size of object is null";
52         return nullptr;
53     }
54     // NOTE(aemelenko): Do smth more memory flexible with alignment
55     size_t alignmentSize = GetAlignmentInBytes(align);
56     if (alignmentSize > size) {
57         LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Change size of allocation to " << alignmentSize
58                                       << " bytes because of alignment";
59         size = alignmentSize;
60     }
61     if (size > RunSlotsType::MaxSlotSize()) {
62         LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Failed to allocate - size of object is too big";
63         return nullptr;
64     }
65     size_t slotSizePowerOfTwo = RunSlotsType::ConvertToPowerOfTwoUnsafe(size);
66     size_t arrayIndex = slotSizePowerOfTwo;
67     const size_t runSlotSize = 1UL << slotSizePowerOfTwo;
68     RunSlotsType *runslots = nullptr;
69     bool usedFromFreedRunslotsList = false;
70     {
71         os::memory::LockHolder<ListLock, NEED_LOCK> listLock(*runslots_[arrayIndex].GetLock());
72         runslots = runslots_[arrayIndex].PopFromHead();
73     }
74     if (runslots == nullptr) {
75         LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "We don't have free RunSlots for size " << runSlotSize
76                                       << ". Try to get new one.";
77         if (DISABLE_USE_FREE_RUNSLOTS) {
78             return nullptr;
79         }
80         {
81             os::memory::LockHolder<ListLock, NEED_LOCK> listLock(*freeRunslots_.GetLock());
82             runslots = freeRunslots_.PopFromHead();
83         }
84         if (runslots != nullptr) {
85             usedFromFreedRunslotsList = true;
86             LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Get RunSlots from free list";
87         } else {
88             LOG_RUNSLOTS_ALLOCATOR(DEBUG)
89                 << "Failed to get new RunSlots from free list, try to allocate one from memory";
90             runslots = CreateNewRunSlotsFromMemory(runSlotSize);
91             if (runslots == nullptr) {
92                 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Failed to allocate an object, couldn't create RunSlots";
93                 return nullptr;
94             }
95         }
96     }
97     void *allocatedMem = nullptr;
98     {
99         os::memory::LockHolder<typename LockConfigT::RunSlotsLock, NEED_LOCK> runslotsLock(*runslots->GetLock());
100         if (usedFromFreedRunslotsList) {
101             // NOTE(aemelenko): if we allocate and free two different size objects,
102             //                  we will have a perf issue here. Maybe it is better to delete free_runslots_?
103             if (runslots->GetSlotsSize() != runSlotSize) {
104                 runslots->Initialize(runSlotSize, runslots->GetPoolPointer(), false);
105             }
106         }
107         LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Used runslots with addr " << std::hex << runslots;
108         allocatedMem = static_cast<void *>(runslots->PopFreeSlot());
109         if (allocatedMem == nullptr) {
110             UNREACHABLE();
111         }
112         LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Allocate a memory at address " << std::hex << allocatedMem;
113         if (!runslots->IsFull()) {
114             os::memory::LockHolder<ListLock, NEED_LOCK> listLock(*runslots_[arrayIndex].GetLock());
115             // We didn't take the last free slot from this RunSlots
116             runslots_[arrayIndex].PushToTail(runslots);
117         }
118         ASAN_UNPOISON_MEMORY_REGION(allocatedMem, size);
119         AllocConfigT::OnAlloc(runSlotSize, typeAllocation_, memStats_);
120         AllocConfigT::MemoryInit(allocatedMem);
121     }
122     return allocatedMem;
123 }
124 
125 template <typename AllocConfigT, typename LockConfigT>
Free(void * mem)126 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::Free(void *mem)
127 {
128     FreeUnsafe<true>(mem);
129 }
130 
131 template <typename AllocConfigT, typename LockConfigT>
ReleaseEmptyRunSlotsPagesUnsafe()132 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::ReleaseEmptyRunSlotsPagesUnsafe()
133 {
134     // Iterate over free_runslots list:
135     RunSlotsType *curFreeRunslots = nullptr;
136     {
137         os::memory::LockHolder listLock(*freeRunslots_.GetLock());
138         curFreeRunslots = freeRunslots_.PopFromHead();
139     }
140     while (curFreeRunslots != nullptr) {
141         memoryPool_.ReturnAndReleaseRunSlotsMemory(curFreeRunslots);
142 
143         {
144             os::memory::LockHolder listLock(*freeRunslots_.GetLock());
145             curFreeRunslots = freeRunslots_.PopFromHead();
146         }
147     }
148 }
149 
150 template <typename AllocConfigT, typename LockConfigT>
FreeUnsafeInternal(RunSlotsType * runslots,void * mem)151 inline bool RunSlotsAllocator<AllocConfigT, LockConfigT>::FreeUnsafeInternal(RunSlotsType *runslots, void *mem)
152 {
153     bool needToAddToFreeList = false;
154     // NOTE(aemelenko): Here can be a performance issue when we allocate/deallocate one object.
155     const size_t runSlotSize = runslots->GetSlotsSize();
156     size_t arrayIndex = RunSlotsType::ConvertToPowerOfTwoUnsafe(runSlotSize);
157     bool runslotsWasFull = runslots->IsFull();
158     runslots->PushFreeSlot(static_cast<FreeSlot *>(mem));
159     /**
160      * RunSlotsAllocator doesn't know this real size which we use in slot, so we record upper bound - size of the
161      * slot.
162      */
163     AllocConfigT::OnFree(runSlotSize, typeAllocation_, memStats_);
164     ASAN_POISON_MEMORY_REGION(mem, runSlotSize);
165     ASSERT(!(runslotsWasFull && runslots->IsEmpty()));  // Runslots has more that one slot inside.
166     if (runslotsWasFull) {
167         LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "This RunSlots was full and now we must add it to the RunSlots list";
168 
169         os::memory::LockHolder listLock(*runslots_[arrayIndex].GetLock());
170 #if PANDA_ENABLE_SLOW_DEBUG
171         ASSERT(!runslots_[arrayIndex].IsInThisList(runslots));
172 #endif
173         runslots_[arrayIndex].PushToTail(runslots);
174     } else if (runslots->IsEmpty()) {
175         os::memory::LockHolder listLock(*runslots_[arrayIndex].GetLock());
176         // Check that we may took this runslots from list on alloc
177         // and waiting for lock
178         if ((runslots->GetNextRunSlots() != nullptr) || (runslots->GetPrevRunSlots() != nullptr) ||
179             (runslots_[arrayIndex].GetHead() == runslots)) {
180             LOG_RUNSLOTS_ALLOCATOR(DEBUG)
181                 << "This RunSlots is empty. Pop it from runslots list and push it to free_runslots_";
182             runslots_[arrayIndex].PopFromList(runslots);
183             needToAddToFreeList = true;
184         }
185     }
186 
187     return needToAddToFreeList;
188 }
189 
190 template <typename AllocConfigT, typename LockConfigT>
191 template <bool LOCK_RUN_SLOTS>
FreeUnsafe(void * mem)192 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::FreeUnsafe(void *mem)
193 {
194     if (UNLIKELY(mem == nullptr)) {
195         LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Try to free memory at invalid addr 0";
196         return;
197     }
198     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Try to free object at address " << std::hex << mem;
199 #ifndef NDEBUG
200     if (!AllocatedByRunSlotsAllocatorUnsafe(mem)) {
201         LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "This object was not allocated by this allocator";
202         return;
203     }
204 #endif  // !NDEBUG
205 
206     // Now we 100% sure that this object was allocated by RunSlots allocator.
207     // We can just do alignment for this address and get a pointer to RunSlots header
208     uintptr_t runslotsAddr = (ToUintPtr(mem) >> RUNSLOTS_ALIGNMENT) << RUNSLOTS_ALIGNMENT;
209     auto runslots = static_cast<RunSlotsType *>(ToVoidPtr(runslotsAddr));
210     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "It is RunSlots with addr " << std::hex << static_cast<void *>(runslots);
211 
212     // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
213     if constexpr (LOCK_RUN_SLOTS) {
214         runslots->GetLock()->Lock();
215     }
216 
217     bool needToAddToFreeList = FreeUnsafeInternal(runslots, mem);
218 
219     // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
220     if constexpr (LOCK_RUN_SLOTS) {
221         runslots->GetLock()->Unlock();
222     }
223 
224     if (needToAddToFreeList) {
225         os::memory::LockHolder listLock(*freeRunslots_.GetLock());
226         freeRunslots_.PushToTail(runslots);
227     }
228     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Freed object at address " << std::hex << mem;
229 }
230 
231 template <typename AllocConfigT, typename LockConfigT>
Collect(const GCObjectVisitor & deathCheckerFn)232 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::Collect(const GCObjectVisitor &deathCheckerFn)
233 {
234     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Collecting for RunSlots allocator started";
235     IterateOverObjects([this, &deathCheckerFn](ObjectHeader *objectHeader) {
236         LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "  iterate over " << std::hex << objectHeader;
237         if (deathCheckerFn(objectHeader) == ObjectStatus::DEAD_OBJECT) {
238             LOG(DEBUG, GC) << "DELETE OBJECT " << GetDebugInfoAboutObject(objectHeader);
239             FreeUnsafe<false>(objectHeader);
240         }
241     });
242     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Collecting for RunSlots allocator finished";
243 }
244 
245 template <typename AllocConfigT, typename LockConfigT>
246 template <typename ObjectVisitor>
IterateOverObjects(const ObjectVisitor & objectVisitor)247 void RunSlotsAllocator<AllocConfigT, LockConfigT>::IterateOverObjects(const ObjectVisitor &objectVisitor)
248 {
249     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Iteration over objects started";
250     memoryPool_.IterateOverObjects(objectVisitor);
251     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Iteration over objects finished";
252 }
253 
254 template <typename AllocConfigT, typename LockConfigT>
AllocatedByRunSlotsAllocator(void * object)255 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::AllocatedByRunSlotsAllocator(void *object)
256 {
257     return AllocatedByRunSlotsAllocatorUnsafe(object);
258 }
259 
260 template <typename AllocConfigT, typename LockConfigT>
AllocatedByRunSlotsAllocatorUnsafe(void * object)261 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::AllocatedByRunSlotsAllocatorUnsafe(void *object)
262 {
263     // NOTE(aemelenko): Add more complex and optimized solution for this method
264     return memoryPool_.IsInMemPools(object);
265 }
266 
267 template <typename AllocConfigT, typename LockConfigT>
268 template <bool NEED_LOCK>
269 inline typename RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsType *
CreateNewRunSlotsFromMemory(size_t slotsSize)270 RunSlotsAllocator<AllocConfigT, LockConfigT>::CreateNewRunSlotsFromMemory(size_t slotsSize)
271 {
272     RunSlotsType *runslots = memoryPool_.template GetNewRunSlots<NEED_LOCK>(slotsSize);
273     if (runslots != nullptr) {
274         LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Take " << RUNSLOTS_SIZE << " bytes of memory for new RunSlots instance from "
275                                       << std::hex << runslots;
276         return runslots;
277     }
278     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "There is no free memory for RunSlots";
279     return runslots;
280 }
281 
282 template <typename AllocConfigT, typename LockConfigT>
AddMemoryPool(void * mem,size_t size)283 inline bool RunSlotsAllocator<AllocConfigT, LockConfigT>::AddMemoryPool(void *mem, size_t size)
284 {
285     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Get new memory pool with size " << size << " bytes, at addr " << std::hex << mem;
286     // Try to add this memory to the memory_pool_
287     if (mem == nullptr) {
288         LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Failed to add memory, the memory is nullptr";
289         return false;
290     }
291     if (size > MIN_POOL_SIZE) {
292         // NOTE(aemelenko): The size of the pool is fixed by now,
293         // because it is requested for correct freed_runslots_bitmap_
294         // workflow. Fix it in #4018
295         LOG_RUNSLOTS_ALLOCATOR(DEBUG)
296             << "Can't add new memory pool to this allocator because the memory size is equal to " << MIN_POOL_SIZE;
297         return false;
298     }
299     if (!memoryPool_.AddNewMemoryPool(mem, size)) {
300         LOG_RUNSLOTS_ALLOCATOR(DEBUG)
301             << "Can't add new memory pool to this allocator. Maybe we already added too much memory pools.";
302         return false;
303     }
304     return true;
305 }
306 
307 template <typename AllocConfigT, typename LockConfigT>
308 template <typename MemVisitor>
VisitAndRemoveAllPools(const MemVisitor & memVisitor)309 void RunSlotsAllocator<AllocConfigT, LockConfigT>::VisitAndRemoveAllPools(const MemVisitor &memVisitor)
310 {
311     // We call this method and return pools to the system.
312     // Therefore, delete all objects to clear all external dependences
313     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Clear all objects inside the allocator";
314     memoryPool_.VisitAllPools(memVisitor);
315 }
316 
317 template <typename AllocConfigT, typename LockConfigT>
318 template <typename MemVisitor>
VisitAndRemoveFreePools(const MemVisitor & memVisitor)319 void RunSlotsAllocator<AllocConfigT, LockConfigT>::VisitAndRemoveFreePools(const MemVisitor &memVisitor)
320 {
321     ReleaseEmptyRunSlotsPagesUnsafe();
322     // We need to remove RunSlots from RunSlotsList
323     // All of them must be inside free_runslots_ list.
324     memoryPool_.VisitAndRemoveFreePools(memVisitor);
325 }
326 
327 template <typename AllocConfigT, typename LockConfigT>
328 template <typename MemVisitor>
IterateOverObjectsInRange(const MemVisitor & memVisitor,void * leftBorder,void * rightBorder)329 void RunSlotsAllocator<AllocConfigT, LockConfigT>::IterateOverObjectsInRange(const MemVisitor &memVisitor,
330                                                                              void *leftBorder, void *rightBorder)
331 {
332     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "IterateOverObjectsInRange for range [" << std::hex << leftBorder << ", "
333                                   << rightBorder << "]";
334     ASSERT(ToUintPtr(rightBorder) >= ToUintPtr(leftBorder));
335     if (!AllocatedByRunSlotsAllocatorUnsafe(leftBorder)) {
336         LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "This memory range is not covered by this allocator";
337         return;
338     }
339     // NOTE(aemelenko): These are temporary asserts because we can't do anything
340     // if the range crosses different allocators memory pools
341     ASSERT(ToUintPtr(rightBorder) - ToUintPtr(leftBorder) == (CrossingMapSingleton::GetCrossingMapGranularity() - 1U));
342     ASSERT((ToUintPtr(rightBorder) & (~(CrossingMapSingleton::GetCrossingMapGranularity() - 1U))) ==
343            (ToUintPtr(leftBorder) & (~(CrossingMapSingleton::GetCrossingMapGranularity() - 1U))));
344     // Now we 100% sure that this left_border was allocated by RunSlots allocator.
345     // We can just do alignment for this address and get a pointer to RunSlots header
346     uintptr_t runslotsAddr = (ToUintPtr(leftBorder) >> RUNSLOTS_ALIGNMENT) << RUNSLOTS_ALIGNMENT;
347     while (runslotsAddr < ToUintPtr(rightBorder)) {
348         auto runslots = static_cast<RunSlotsType *>(ToVoidPtr(runslotsAddr));
349         os::memory::LockHolder runslotsLock(*runslots->GetLock());
350         LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "IterateOverObjectsInRange, It is RunSlots with addr " << std::hex
351                                       << static_cast<void *>(runslots);
352         runslots->IterateOverOccupiedSlots(memVisitor);
353         runslotsAddr += RUNSLOTS_SIZE;
354     }
355     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "IterateOverObjectsInRange finished";
356 }
357 
358 template <typename AllocConfigT, typename LockConfigT>
VerifyAllocator()359 size_t RunSlotsAllocator<AllocConfigT, LockConfigT>::VerifyAllocator()
360 {
361     size_t failCnt = 0;
362     for (size_t i = 0; i < SLOTS_SIZES_VARIANTS; i++) {
363         RunSlotsType *runslots = nullptr;
364         {
365             os::memory::LockHolder listLock(*runslots_[i].GetLock());
366             runslots = runslots_[i].GetHead();
367         }
368         if (runslots != nullptr) {
369             os::memory::LockHolder runslotsLock(*runslots->GetLock());
370             failCnt += runslots->VerifyRun();
371         }
372     }
373     return failCnt;
374 }
375 
376 template <typename AllocConfigT, typename LockConfigT>
ContainObject(const ObjectHeader * obj)377 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::ContainObject(const ObjectHeader *obj)
378 {
379     return AllocatedByRunSlotsAllocatorUnsafe(const_cast<ObjectHeader *>(obj));
380 }
381 
382 template <typename AllocConfigT, typename LockConfigT>
IsLive(const ObjectHeader * obj)383 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::IsLive(const ObjectHeader *obj)
384 {
385     ASSERT(ContainObject(obj));
386     uintptr_t runslotsAddr = ToUintPtr(obj) >> RUNSLOTS_ALIGNMENT << RUNSLOTS_ALIGNMENT;
387     auto run = static_cast<RunSlotsType *>(ToVoidPtr(runslotsAddr));
388     if (run->IsEmpty()) {
389         return false;
390     }
391     return run->IsLive(obj);
392 }
393 
394 template <typename AllocConfigT, typename LockConfigT>
TrimUnsafe()395 void RunSlotsAllocator<AllocConfigT, LockConfigT>::TrimUnsafe()
396 {
397     // release page in free runslots list
398     auto head = freeRunslots_.GetHead();
399     while (head != nullptr) {
400         auto next = head->GetNextRunSlots();
401         os::mem::ReleasePages(ToUintPtr(head), ToUintPtr(head) + RUNSLOTS_SIZE);
402         head = next;
403     }
404 
405     memoryPool_.VisitAllPoolsWithOccupiedSize([](void *mem, size_t usedSize, size_t size) {
406         uintptr_t start = AlignUp(ToUintPtr(mem) + usedSize, ark::os::mem::GetPageSize());
407         uintptr_t end = ToUintPtr(mem) + size;
408         if (end >= start + ark::os::mem::GetPageSize()) {
409             os::mem::ReleasePages(start, end);
410         }
411     });
412 }
413 
414 template <typename AllocConfigT, typename LockConfigT>
PushToTail(RunSlotsType * runslots)415 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsList::PushToTail(RunSlotsType *runslots)
416 {
417     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Push to tail RunSlots at addr " << std::hex << static_cast<void *>(runslots);
418     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "     tail_ " << std::hex << tail_;
419     if (tail_ == nullptr) {
420         LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "     List was empty, setup head_ and tail_";
421         // this means that head_ == nullptr too
422         head_ = runslots;
423         tail_ = runslots;
424         return;
425     }
426     tail_->SetNextRunSlots(runslots);
427     runslots->SetPrevRunSlots(tail_);
428     tail_ = runslots;
429     tail_->SetNextRunSlots(nullptr);
430 }
431 
432 template <typename AllocConfigT, typename LockConfigT>
433 inline typename RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsType *
PopFromHead()434 RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsList::PopFromHead()
435 {
436     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "PopFromHead";
437     if (UNLIKELY(head_ == nullptr)) {
438         LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "      List is empty, nothing to pop";
439         return nullptr;
440     }
441     RunSlotsType *headRunslots = head_;
442     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "     popped from head RunSlots " << std::hex << headRunslots;
443     head_ = headRunslots->GetNextRunSlots();
444     if (head_ == nullptr) {
445         LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "     Now list is empty";
446         // We pop the last element in the list
447         tail_ = nullptr;
448     } else {
449         head_->SetPrevRunSlots(nullptr);
450     }
451     headRunslots->SetNextRunSlots(nullptr);
452     return headRunslots;
453 }
454 
455 template <typename AllocConfigT, typename LockConfigT>
456 inline typename RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsType *
PopFromTail()457 RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsList::PopFromTail()
458 {
459     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "PopFromTail";
460     if (UNLIKELY(tail_ == nullptr)) {
461         LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "      List is empty, nothing to pop";
462         return nullptr;
463     }
464     RunSlotsType *tailRunslots = tail_;
465     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "     popped from tail RunSlots " << std::hex << tailRunslots;
466     tail_ = tailRunslots->GetPrevRunSlots();
467     if (tail_ == nullptr) {
468         LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "     Now list is empty";
469         // We pop the last element in the list
470         head_ = nullptr;
471     } else {
472         tail_->SetNextRunSlots(nullptr);
473     }
474     tailRunslots->SetPrevRunSlots(nullptr);
475     return tailRunslots;
476 }
477 
478 template <typename AllocConfigT, typename LockConfigT>
PopFromList(RunSlotsType * runslots)479 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsList::PopFromList(RunSlotsType *runslots)
480 {
481     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "PopFromList RunSlots with addr " << std::hex << runslots;
482     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "head_ = " << std::hex << head_;
483     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "tail_ = " << std::hex << tail_;
484 
485     if (runslots == head_) {
486         LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "It is RunSlots from the head.";
487         PopFromHead();
488         return;
489     }
490     if (runslots == tail_) {
491         LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "It is RunSlots from the tail.";
492         PopFromTail();
493         return;
494     }
495     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Remove RunSlots from the list.";
496     ASSERT(runslots != nullptr);
497     RunSlotsType *nextRunslots = runslots->GetNextRunSlots();
498     RunSlotsType *previousRunslots = runslots->GetPrevRunSlots();
499     ASSERT(nextRunslots != nullptr);
500     ASSERT(previousRunslots != nullptr);
501 
502     nextRunslots->SetPrevRunSlots(previousRunslots);
503     previousRunslots->SetNextRunSlots(nextRunslots);
504     runslots->SetNextRunSlots(nullptr);
505     runslots->SetPrevRunSlots(nullptr);
506 }
507 
508 template <typename AllocConfigT, typename LockConfigT>
MemPoolManager()509 inline RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::MemPoolManager()
510 {
511     occupiedTail_ = nullptr;
512     freeTail_ = nullptr;
513     partiallyOccupiedHead_ = nullptr;
514 }
515 
516 template <typename AllocConfigT, typename LockConfigT>
517 template <bool NEED_LOCK>
518 inline typename RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsType *
GetNewRunSlots(size_t slotsSize)519 RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::GetNewRunSlots(size_t slotsSize)
520 {
521     os::memory::WriteLockHolder<typename LockConfigT::PoolLock, NEED_LOCK> wlock(lock_);
522     RunSlotsType *newRunslots = nullptr;
523     if (partiallyOccupiedHead_ != nullptr) {
524         newRunslots = partiallyOccupiedHead_->GetMemoryForRunSlots(slotsSize);
525         ASSERT(newRunslots != nullptr);
526         if (UNLIKELY(!partiallyOccupiedHead_->HasMemoryForRunSlots())) {
527             partiallyOccupiedHead_ = partiallyOccupiedHead_->GetNext();
528             ASSERT((partiallyOccupiedHead_ == nullptr) || (partiallyOccupiedHead_->HasMemoryForRunSlots()));
529         }
530     } else if (freeTail_ != nullptr) {
531         LOG_RUNSLOTS_ALLOCATOR(DEBUG)
532             << "MemPoolManager: occupied_tail_ doesn't have memory for RunSlots, get new pool from free pools";
533         PoolListElement *freeElement = freeTail_;
534         freeTail_ = freeTail_->GetPrev();
535 
536         freeElement->PopFromList();
537         freeElement->SetPrev(occupiedTail_);
538 
539         if (occupiedTail_ != nullptr) {
540             ASSERT(occupiedTail_->GetNext() == nullptr);
541             occupiedTail_->SetNext(freeElement);
542         }
543         occupiedTail_ = freeElement;
544 
545         if (partiallyOccupiedHead_ == nullptr) {
546             partiallyOccupiedHead_ = occupiedTail_;
547             ASSERT(partiallyOccupiedHead_->HasMemoryForRunSlots());
548         }
549 
550         ASSERT(occupiedTail_->GetNext() == nullptr);
551         newRunslots = occupiedTail_->GetMemoryForRunSlots(slotsSize);
552         ASSERT(newRunslots != nullptr);
553     }
554     return newRunslots;
555 }
556 
557 template <typename AllocConfigT, typename LockConfigT>
AddNewMemoryPool(void * mem,size_t size)558 inline bool RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::AddNewMemoryPool(void *mem, size_t size)
559 {
560     os::memory::WriteLockHolder wlock(lock_);
561     PoolListElement *newPool = PoolListElement::Create(mem, size, freeTail_);
562     if (freeTail_ != nullptr) {
563         ASSERT(freeTail_->GetNext() == nullptr);
564         freeTail_->SetNext(newPool);
565     }
566     freeTail_ = newPool;
567     ASAN_POISON_MEMORY_REGION(mem, size);
568     // To not unpoison it every time at access.
569     ASAN_UNPOISON_MEMORY_REGION(mem, sizeof(PoolListElement));
570     return true;
571 }
572 
573 template <typename AllocConfigT, typename LockConfigT>
ReturnAndReleaseRunSlotsMemory(RunSlotsType * runslots)574 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::ReturnAndReleaseRunSlotsMemory(
575     RunSlotsType *runslots)
576 {
577     os::memory::WriteLockHolder wlock(lock_);
578     auto pool = static_cast<PoolListElement *>(ToVoidPtr(runslots->GetPoolPointer()));
579     if (!pool->HasMemoryForRunSlots()) {
580         ASSERT(partiallyOccupiedHead_ != pool);
581         // We should add move this pool to the end of a occupied list
582         if (pool != occupiedTail_) {
583             pool->PopFromList();
584             pool->SetPrev(occupiedTail_);
585             if (UNLIKELY(occupiedTail_ == nullptr)) {
586                 UNREACHABLE();
587             }
588             occupiedTail_->SetNext(pool);
589             occupiedTail_ = pool;
590         } else {
591             ASSERT(partiallyOccupiedHead_ == nullptr);
592         }
593         if (partiallyOccupiedHead_ == nullptr) {
594             partiallyOccupiedHead_ = occupiedTail_;
595         }
596     }
597 
598     pool->AddFreedRunSlots(runslots);
599     ASSERT(partiallyOccupiedHead_->HasMemoryForRunSlots());
600 
601     // Start address from which we can release pages
602     uintptr_t startAddr = AlignUp(ToUintPtr(runslots), os::mem::GetPageSize());
603     // End address before which we can release pages
604     uintptr_t endAddr = os::mem::AlignDownToPageSize(ToUintPtr(runslots) + RUNSLOTS_SIZE);
605     if (startAddr < endAddr) {
606         os::mem::ReleasePages(startAddr, endAddr);
607     }
608 }
609 
610 template <typename AllocConfigT, typename LockConfigT>
IsInMemPools(void * object)611 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::IsInMemPools(void *object)
612 {
613     os::memory::ReadLockHolder rlock(lock_);
614     PoolListElement *current = occupiedTail_;
615     while (current != nullptr) {
616         if (current->IsInUsedMemory(object)) {
617             return true;
618         }
619         current = current->GetPrev();
620     }
621     return false;
622 }
623 
624 template <typename AllocConfigT, typename LockConfigT>
625 template <typename ObjectVisitor>
IterateOverObjects(const ObjectVisitor & objectVisitor)626 void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::IterateOverObjects(
627     const ObjectVisitor &objectVisitor)
628 {
629     PoolListElement *currentPool = nullptr;
630     {
631         os::memory::ReadLockHolder rlock(lock_);
632         currentPool = occupiedTail_;
633     }
634     while (currentPool != nullptr) {
635         currentPool->IterateOverRunSlots([&currentPool, &objectVisitor](RunSlotsType *runslots) {
636             os::memory::LockHolder runslotsLock(*runslots->GetLock());
637             UNUSED_VAR(currentPool);  // For release build
638             ASSERT(runslots->GetPoolPointer() == ToUintPtr(currentPool));
639             runslots->IterateOverOccupiedSlots(objectVisitor);
640             return true;
641         });
642         {
643             os::memory::ReadLockHolder rlock(lock_);
644             currentPool = currentPool->GetPrev();
645         }
646     }
647 }
648 
649 template <typename AllocConfigT, typename LockConfigT>
650 template <typename MemVisitor>
VisitAllPools(const MemVisitor & memVisitor)651 void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::VisitAllPools(const MemVisitor &memVisitor)
652 {
653     os::memory::WriteLockHolder wlock(lock_);
654     PoolListElement *currentPool = occupiedTail_;
655     while (currentPool != nullptr) {
656         // Use tmp in case if visitor with side effects
657         PoolListElement *tmp = currentPool->GetPrev();
658         memVisitor(currentPool->GetPoolMemory(), currentPool->GetSize());
659         currentPool = tmp;
660     }
661 }
662 
663 template <typename AllocConfigT, typename LockConfigT>
664 template <typename MemVisitor>
VisitAllPoolsWithOccupiedSize(const MemVisitor & memVisitor)665 void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::VisitAllPoolsWithOccupiedSize(
666     const MemVisitor &memVisitor)
667 {
668     os::memory::WriteLockHolder wlock(lock_);
669     PoolListElement *currentPool = occupiedTail_;
670     while (currentPool != nullptr) {
671         // Use tmp in case if visitor with side effects
672         PoolListElement *tmp = currentPool->GetPrev();
673         memVisitor(currentPool->GetPoolMemory(), currentPool->GetOccupiedSize(), currentPool->GetSize());
674         currentPool = tmp;
675     }
676 }
677 
678 template <typename AllocConfigT, typename LockConfigT>
679 template <typename MemVisitor>
VisitAndRemoveFreePools(const MemVisitor & memVisitor)680 void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::VisitAndRemoveFreePools(const MemVisitor &memVisitor)
681 {
682     os::memory::WriteLockHolder wlock(lock_);
683     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "VisitAllFreePools inside RunSlotsAllocator";
684     // First, iterate over totally free pools:
685     PoolListElement *currentPool = freeTail_;
686     while (currentPool != nullptr) {
687         LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "VisitAllFreePools: Visit free pool with addr " << std::hex
688                                       << currentPool->GetPoolMemory() << " and size " << std::dec
689                                       << currentPool->GetSize();
690         // Use tmp in case if visitor with side effects
691         PoolListElement *tmp = currentPool->GetPrev();
692         memVisitor(currentPool->GetPoolMemory(), currentPool->GetSize());
693         currentPool = tmp;
694     }
695     freeTail_ = nullptr;
696     // Second, try to find free pool in occupied:
697     currentPool = occupiedTail_;
698     while (currentPool != nullptr) {
699         // Use tmp in case if visitor with side effects
700         PoolListElement *tmp = currentPool->GetPrev();
701         if (!currentPool->HasUsedMemory()) {
702             LOG_RUNSLOTS_ALLOCATOR(DEBUG)
703                 << "VisitAllFreePools: Visit occupied pool with addr " << std::hex << currentPool->GetPoolMemory()
704                 << " and size " << std::dec << currentPool->GetSize();
705             // This Pool doesn't have any occupied memory in RunSlots
706             // Therefore, we can free it
707             if (occupiedTail_ == currentPool) {
708                 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "VisitAllFreePools: Update occupied_tail_";
709                 occupiedTail_ = currentPool->GetPrev();
710             }
711             if (currentPool == partiallyOccupiedHead_) {
712                 partiallyOccupiedHead_ = partiallyOccupiedHead_->GetNext();
713                 ASSERT((partiallyOccupiedHead_ == nullptr) || (partiallyOccupiedHead_->HasMemoryForRunSlots()));
714             }
715             currentPool->PopFromList();
716             memVisitor(currentPool->GetPoolMemory(), currentPool->GetSize());
717         }
718         currentPool = tmp;
719     }
720 }
721 
722 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
723 template <typename AllocConfigT, typename LockConfigT>
PoolListElement()724 inline RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::PoolListElement()
725 {
726     startMem_ = 0;
727     poolMem_ = 0;
728     size_ = 0;
729     freePtr_ = 0;
730     prevPool_ = nullptr;
731     nextPool_ = nullptr;
732     freededRunslotsCount_ = 0;
733     memset_s(storageForBitmap_.data(), sizeof(BitMapStorageType), 0, sizeof(BitMapStorageType));
734 }
735 
736 template <typename AllocConfigT, typename LockConfigT>
PopFromList()737 void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::PopFromList()
738 {
739     if (nextPool_ != nullptr) {
740         nextPool_->SetPrev(prevPool_);
741     }
742     if (prevPool_ != nullptr) {
743         prevPool_->SetNext(nextPool_);
744     }
745     nextPool_ = nullptr;
746     prevPool_ = nullptr;
747 }
748 
749 template <typename AllocConfigT, typename LockConfigT>
GetFirstRunSlotsBlock(uintptr_t mem)750 uintptr_t RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::GetFirstRunSlotsBlock(
751     uintptr_t mem)
752 {
753     return AlignUp(mem, 1UL << RUNSLOTS_ALIGNMENT);
754 }
755 
756 template <typename AllocConfigT, typename LockConfigT>
Initialize(void * poolMem,uintptr_t unoccupiedMem,size_t size,PoolListElement * prev)757 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::Initialize(
758     void *poolMem, uintptr_t unoccupiedMem, size_t size, PoolListElement *prev)
759 {
760     startMem_ = unoccupiedMem;
761     poolMem_ = ToUintPtr(poolMem);
762     size_ = size;
763     // Atomic with release order reason: data race with free_ptr_ with dependecies on writes before the store which
764     // should become visible acquire
765     freePtr_.store(GetFirstRunSlotsBlock(startMem_), std::memory_order_release);
766     prevPool_ = prev;
767     nextPool_ = nullptr;
768     freededRunslotsCount_ = 0;
769     freedRunslotsBitmap_.ReInitializeMemoryRange(poolMem);
770     ASSERT(freedRunslotsBitmap_.FindFirstMarkedChunks() == nullptr);
771     // Atomic with acquire order reason: data race with free_ptr_ with dependecies on reads after the load which should
772     // become visible
773     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "PoolMemory: first free RunSlots block = " << std::hex
774                                   << freePtr_.load(std::memory_order_acquire);
775 }
776 
777 template <typename AllocConfigT, typename LockConfigT>
778 inline typename RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsType *
GetMemoryForRunSlots(size_t slotsSize)779 RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::GetMemoryForRunSlots(size_t slotsSize)
780 {
781     if (!HasMemoryForRunSlots()) {
782         LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "PoolMemory: There is no free memory for RunSlots";
783         return nullptr;
784     }
785     RunSlotsType *runslots = GetFreedRunSlots(slotsSize);
786     if (runslots == nullptr) {
787         // Atomic with acquire order reason: data race with free_ptr_ with dependecies on reads after the load which
788         // should become visible
789         uintptr_t oldMem = freePtr_.load(std::memory_order_acquire);
790         ASSERT(poolMem_ + size_ >= oldMem + RUNSLOTS_SIZE);
791 
792         // Initialize it firstly before updating free ptr
793         // because it will be visible outside after that.
794         runslots = static_cast<RunSlotsType *>(ToVoidPtr(oldMem));
795         runslots->Initialize(slotsSize, ToUintPtr(this), true);
796         // Atomic with acq_rel order reason: data race with free_ptr_ with dependecies on reads after the load and on
797         // writes before the store
798         freePtr_.fetch_add(RUNSLOTS_SIZE, std::memory_order_acq_rel);
799         // Atomic with acquire order reason: data race with free_ptr_ with dependecies on reads after the load which
800         // should become visible
801         ASSERT(freePtr_.load(std::memory_order_acquire) == (oldMem + RUNSLOTS_SIZE));
802         LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "PoolMemory: Took memory for RunSlots from addr " << std::hex
803                                       << ToVoidPtr(oldMem)
804                                       // Atomic with acquire order reason: data race with free_ptr_
805                                       << ". New first free RunSlots block = "
806                                       << ToVoidPtr(freePtr_.load(std::memory_order_acquire));
807     }
808     ASSERT(runslots != nullptr);
809     return runslots;
810 }
811 
812 template <typename AllocConfigT, typename LockConfigT>
813 template <typename RunSlotsVisitor>
IterateOverRunSlots(const RunSlotsVisitor & runslotsVisitor)814 void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::IterateOverRunSlots(
815     const RunSlotsVisitor &runslotsVisitor)
816 {
817     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Iterating over runslots inside pool with address" << std::hex << poolMem_
818                                   << " with size " << std::dec << size_ << " bytes";
819     uintptr_t currentRunslot = GetFirstRunSlotsBlock(startMem_);
820     // Atomic with acquire order reason: data race with free_ptr_ with dependecies on reads after the load which should
821     // become visible
822     uintptr_t lastRunslot = freePtr_.load(std::memory_order_acquire);
823     while (currentRunslot < lastRunslot) {
824         ASSERT(startMem_ <= currentRunslot);
825         if (!freedRunslotsBitmap_.AtomicTest(ToVoidPtr(currentRunslot))) {
826             auto curRs = static_cast<RunSlotsType *>(ToVoidPtr(currentRunslot));
827             LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Iterating. Process RunSlots " << std::hex << curRs;
828             if (!runslotsVisitor(curRs)) {
829                 return;
830             }
831         }
832         currentRunslot += RUNSLOTS_SIZE;
833     }
834     LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Iterating runslots inside this pool finished";
835 }
836 
837 template <typename AllocConfigT, typename LockConfigT>
HasUsedMemory()838 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::HasUsedMemory()
839 {
840     uintptr_t currentRunslot = GetFirstRunSlotsBlock(startMem_);
841     // Atomic with acquire order reason: data race with free_ptr_ with dependecies on reads after the load which should
842     // become visible
843     uintptr_t lastRunslot = freePtr_.load(std::memory_order_acquire);
844     while (currentRunslot < lastRunslot) {
845         ASSERT(startMem_ <= currentRunslot);
846         if (!freedRunslotsBitmap_.AtomicTest(ToVoidPtr(currentRunslot))) {
847             // We have runslots instance which is in use somewhere.
848             return true;
849         }
850         currentRunslot += RUNSLOTS_SIZE;
851     }
852     return false;
853 }
854 
855 template <typename AllocConfigT, typename LockConfigT>
GetOccupiedSize()856 size_t RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::GetOccupiedSize()
857 {
858     if (!IsInitialized()) {
859         return 0;
860     }
861     // Atomic with acquire order reason: data race with free_ptr_ with dependecies on reads after the load which should
862     // become visible
863     return freePtr_.load(std::memory_order_acquire) - poolMem_;
864 }
865 
866 template <typename AllocConfigT, typename LockConfigT>
IsInUsedMemory(void * object)867 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::IsInUsedMemory(void *object)
868 {
869     uintptr_t memPointer = startMem_;
870     ASSERT(!((ToUintPtr(object) < GetFirstRunSlotsBlock(memPointer)) && (ToUintPtr(object) >= memPointer)));
871     // Atomic with acquire order reason: data race with free_ptr_ with dependecies on reads after the load which should
872     // become visible
873     bool isInAllocatedMemory = (ToUintPtr(object) < freePtr_.load(std::memory_order_acquire)) &&
874                                (ToUintPtr(object) >= GetFirstRunSlotsBlock(memPointer));
875     return isInAllocatedMemory && !IsInFreedRunSlots(object);
876 }
877 
878 template <typename AllocConfigT, typename LockConfigT>
879 typename RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsType *
GetFreedRunSlots(size_t slotsSize)880 RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::GetFreedRunSlots(size_t slotsSize)
881 {
882     auto slots = static_cast<RunSlotsType *>(freedRunslotsBitmap_.FindFirstMarkedChunks());
883     if (slots == nullptr) {
884         ASSERT(freededRunslotsCount_ == 0);
885         return nullptr;
886     }
887 
888     // Initialize it firstly before updating bitmap
889     // because it will be visible outside after that.
890     slots->Initialize(slotsSize, ToUintPtr(this), true);
891 
892     ASSERT(freededRunslotsCount_ > 0);
893     [[maybe_unused]] bool oldVal = freedRunslotsBitmap_.AtomicTestAndClear(slots);
894     ASSERT(oldVal);
895     freededRunslotsCount_--;
896 
897     return slots;
898 }
899 
900 template <typename AllocConfigT, typename LockConfigT>
HasMemoryForRunSlots()901 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::HasMemoryForRunSlots()
902 {
903     if (!IsInitialized()) {
904         return false;
905     }
906     // Atomic with acquire order reason: data race with free_ptr_ with dependecies on reads after the load which should
907     // become visible
908     bool hasFreeMemory = (freePtr_.load(std::memory_order_acquire) + RUNSLOTS_SIZE) <= (poolMem_ + size_);
909     bool hasFreedRunslots = (freededRunslotsCount_ > 0);
910     ASSERT(hasFreedRunslots == (freedRunslotsBitmap_.FindFirstMarkedChunks() != nullptr));
911     return hasFreeMemory || hasFreedRunslots;
912 }
913 
914 #undef LOG_RUNSLOTS_ALLOCATOR
915 
916 }  // namespace ark::mem
917 #endif  // PANDA_RUNTIME_MEM_RUNSLOTS_ALLOCATOR_INL_H
918