1 /**
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #ifndef PANDA_RUNTIME_MEM_RUNSLOTS_ALLOCATOR_INL_H
16 #define PANDA_RUNTIME_MEM_RUNSLOTS_ALLOCATOR_INL_H
17
18 #include <securec.h>
19 #include "libpandabase/utils/asan_interface.h"
20 #include "runtime/mem/alloc_config.h"
21 #include "runtime/mem/object_helpers.h"
22 #include "runtime/mem/runslots_allocator.h"
23
24 namespace ark::mem {
25
26 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
27 #define LOG_RUNSLOTS_ALLOCATOR(level) LOG(level, ALLOC) << "RunSlotsAllocator: "
28
29 template <typename AllocConfigT, typename LockConfigT>
RunSlotsAllocator(MemStatsType * memStats,SpaceType typeAllocation)30 inline RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsAllocator(MemStatsType *memStats, SpaceType typeAllocation)
31 : typeAllocation_(typeAllocation), memStats_(memStats)
32 {
33 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Initializing RunSlotsAllocator";
34 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Initializing RunSlotsAllocator finished";
35 }
36
37 template <typename AllocConfigT, typename LockConfigT>
~RunSlotsAllocator()38 inline RunSlotsAllocator<AllocConfigT, LockConfigT>::~RunSlotsAllocator()
39 {
40 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Destroying RunSlotsAllocator";
41 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Destroying RunSlotsAllocator finished";
42 }
43
44 template <typename AllocConfigT, typename LockConfigT>
45 template <bool NEED_LOCK, bool DISABLE_USE_FREE_RUNSLOTS>
46 // CC-OFFNXT(G.FUN.01-CPP) Allocations perf critical, the change will create additional conditions and method calls that
47 // will degrade performance
48 // CC-OFFNXT(G.FUD.06) Allocations perf critical, the change will create additional conditions and method calls that
49 // will degrade performance
Alloc(size_t size,Alignment align)50 inline void *RunSlotsAllocator<AllocConfigT, LockConfigT>::Alloc(size_t size, Alignment align)
51 {
52 using ListLock = typename LockConfigT::ListLock;
53 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Try to allocate " << size << " bytes of memory with align " << align;
54 if (size == 0) {
55 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Failed to allocate - size of object is null";
56 return nullptr;
57 }
58 // NOTE(aemelenko): Do smth more memory flexible with alignment
59 size_t alignmentSize = GetAlignmentInBytes(align);
60 if (alignmentSize > size) {
61 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Change size of allocation to " << alignmentSize
62 << " bytes because of alignment";
63 size = alignmentSize;
64 }
65 if (size > RunSlotsType::MaxSlotSize()) {
66 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Failed to allocate - size of object is too big";
67 return nullptr;
68 }
69 size_t slotSizePowerOfTwo = RunSlotsType::ConvertToPowerOfTwoUnsafe(size);
70 size_t arrayIndex = slotSizePowerOfTwo;
71 const size_t runSlotSize = 1UL << slotSizePowerOfTwo;
72 RunSlotsType *runslots = nullptr;
73 bool usedFromFreedRunslotsList = false;
74 {
75 os::memory::LockHolder<ListLock, NEED_LOCK> listLock(*runslots_[arrayIndex].GetLock());
76 runslots = runslots_[arrayIndex].PopFromHead();
77 }
78 if (runslots == nullptr) {
79 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "We don't have free RunSlots for size " << runSlotSize
80 << ". Try to get new one.";
81 if (DISABLE_USE_FREE_RUNSLOTS) {
82 return nullptr;
83 }
84 {
85 os::memory::LockHolder<ListLock, NEED_LOCK> listLock(*freeRunslots_.GetLock());
86 runslots = freeRunslots_.PopFromHead();
87 }
88 if (runslots != nullptr) {
89 usedFromFreedRunslotsList = true;
90 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Get RunSlots from free list";
91 } else {
92 LOG_RUNSLOTS_ALLOCATOR(DEBUG)
93 << "Failed to get new RunSlots from free list, try to allocate one from memory";
94 runslots = CreateNewRunSlotsFromMemory(runSlotSize);
95 if (runslots == nullptr) {
96 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Failed to allocate an object, couldn't create RunSlots";
97 return nullptr;
98 }
99 }
100 }
101 void *allocatedMem = nullptr;
102 {
103 os::memory::LockHolder<typename LockConfigT::RunSlotsLock, NEED_LOCK> runslotsLock(*runslots->GetLock());
104 if (usedFromFreedRunslotsList) {
105 // NOTE(aemelenko): if we allocate and free two different size objects,
106 // we will have a perf issue here. Maybe it is better to delete free_runslots_?
107 if (runslots->GetSlotsSize() != runSlotSize) {
108 runslots->Initialize(runSlotSize, runslots->GetPoolPointer(), false);
109 }
110 }
111 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Used runslots with addr " << std::hex << runslots;
112 allocatedMem = static_cast<void *>(runslots->PopFreeSlot());
113 if (allocatedMem == nullptr) {
114 UNREACHABLE();
115 }
116 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Allocate a memory at address " << std::hex << allocatedMem;
117 if (!runslots->IsFull()) {
118 os::memory::LockHolder<ListLock, NEED_LOCK> listLock(*runslots_[arrayIndex].GetLock());
119 // We didn't take the last free slot from this RunSlots
120 runslots_[arrayIndex].PushToTail(runslots);
121 }
122 ASAN_UNPOISON_MEMORY_REGION(allocatedMem, size);
123 AllocConfigT::OnAlloc(runSlotSize, typeAllocation_, memStats_);
124 AllocConfigT::MemoryInit(allocatedMem);
125 #ifdef PANDA_MEASURE_FRAGMENTATION
126 // Atomic with relaxed order reason: order is not required
127 allocatedBytes_.fetch_add(runSlotSize, std::memory_order_relaxed);
128 #endif
129 }
130 return allocatedMem;
131 }
132
133 template <typename AllocConfigT, typename LockConfigT>
Free(void * mem)134 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::Free(void *mem)
135 {
136 FreeUnsafe<true>(mem);
137 }
138
139 template <typename AllocConfigT, typename LockConfigT>
140 // CC-OFFNXT(G.FUD.06) perf critical
ReleaseEmptyRunSlotsPagesUnsafe()141 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::ReleaseEmptyRunSlotsPagesUnsafe()
142 {
143 // Iterate over free_runslots list:
144 RunSlotsType *curFreeRunslots = nullptr;
145 {
146 os::memory::LockHolder listLock(*freeRunslots_.GetLock());
147 curFreeRunslots = freeRunslots_.PopFromHead();
148 }
149 while (curFreeRunslots != nullptr) {
150 memoryPool_.ReturnAndReleaseRunSlotsMemory(curFreeRunslots);
151
152 {
153 os::memory::LockHolder listLock(*freeRunslots_.GetLock());
154 curFreeRunslots = freeRunslots_.PopFromHead();
155 }
156 }
157 }
158
159 template <typename AllocConfigT, typename LockConfigT>
160 // CC-OFFNXT(G.FUD.06) perf critical
FreeUnsafeInternal(RunSlotsType * runslots,void * mem)161 inline bool RunSlotsAllocator<AllocConfigT, LockConfigT>::FreeUnsafeInternal(RunSlotsType *runslots, void *mem)
162 {
163 bool needToAddToFreeList = false;
164 // NOTE(aemelenko): Here can be a performance issue when we allocate/deallocate one object.
165 const size_t runSlotSize = runslots->GetSlotsSize();
166 size_t arrayIndex = RunSlotsType::ConvertToPowerOfTwoUnsafe(runSlotSize);
167 bool runslotsWasFull = runslots->IsFull();
168 runslots->PushFreeSlot(static_cast<FreeSlot *>(mem));
169 /**
170 * RunSlotsAllocator doesn't know this real size which we use in slot, so we record upper bound - size of the
171 * slot.
172 */
173 AllocConfigT::OnFree(runSlotSize, typeAllocation_, memStats_);
174 #ifdef PANDA_MEASURE_FRAGMENTATION
175 // Atomic with relaxed order reason: order is not required
176 allocatedBytes_.fetch_sub(runSlotSize, std::memory_order_relaxed);
177 #endif
178 ASAN_POISON_MEMORY_REGION(mem, runSlotSize);
179 ASSERT(!(runslotsWasFull && runslots->IsEmpty())); // Runslots has more that one slot inside.
180 if (runslotsWasFull) {
181 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "This RunSlots was full and now we must add it to the RunSlots list";
182
183 os::memory::LockHolder listLock(*runslots_[arrayIndex].GetLock());
184 #if PANDA_ENABLE_SLOW_DEBUG
185 ASSERT(!runslots_[arrayIndex].IsInThisList(runslots));
186 #endif
187 runslots_[arrayIndex].PushToTail(runslots);
188 } else if (runslots->IsEmpty()) {
189 os::memory::LockHolder listLock(*runslots_[arrayIndex].GetLock());
190 // Check that we may took this runslots from list on alloc
191 // and waiting for lock
192 if ((runslots->GetNextRunSlots() != nullptr) || (runslots->GetPrevRunSlots() != nullptr) ||
193 (runslots_[arrayIndex].GetHead() == runslots)) {
194 LOG_RUNSLOTS_ALLOCATOR(DEBUG)
195 << "This RunSlots is empty. Pop it from runslots list and push it to free_runslots_";
196 runslots_[arrayIndex].PopFromList(runslots);
197 needToAddToFreeList = true;
198 }
199 }
200
201 return needToAddToFreeList;
202 }
203
204 template <typename AllocConfigT, typename LockConfigT>
205 template <bool LOCK_RUN_SLOTS>
206 // CC-OFFNXT(G.FUD.06) perf critical
FreeUnsafe(void * mem)207 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::FreeUnsafe(void *mem)
208 {
209 if (UNLIKELY(mem == nullptr)) {
210 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Try to free memory at invalid addr 0";
211 return;
212 }
213 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Try to free object at address " << std::hex << mem;
214 #ifndef NDEBUG
215 if (!AllocatedByRunSlotsAllocatorUnsafe(mem)) {
216 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "This object was not allocated by this allocator";
217 return;
218 }
219 #endif // !NDEBUG
220
221 // Now we 100% sure that this object was allocated by RunSlots allocator.
222 // We can just do alignment for this address and get a pointer to RunSlots header
223 uintptr_t runslotsAddr = (ToUintPtr(mem) >> RUNSLOTS_ALIGNMENT) << RUNSLOTS_ALIGNMENT;
224 auto runslots = static_cast<RunSlotsType *>(ToVoidPtr(runslotsAddr));
225 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "It is RunSlots with addr " << std::hex << static_cast<void *>(runslots);
226
227 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
228 if constexpr (LOCK_RUN_SLOTS) {
229 runslots->GetLock()->Lock();
230 }
231
232 bool needToAddToFreeList = FreeUnsafeInternal(runslots, mem);
233
234 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
235 if constexpr (LOCK_RUN_SLOTS) {
236 runslots->GetLock()->Unlock();
237 }
238
239 if (needToAddToFreeList) {
240 os::memory::LockHolder listLock(*freeRunslots_.GetLock());
241 freeRunslots_.PushToTail(runslots);
242 }
243 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Freed object at address " << std::hex << mem;
244 }
245
246 template <typename AllocConfigT, typename LockConfigT>
Collect(const GCObjectVisitor & deathCheckerFn)247 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::Collect(const GCObjectVisitor &deathCheckerFn)
248 {
249 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Collecting for RunSlots allocator started";
250 IterateOverObjects([this, &deathCheckerFn](ObjectHeader *objectHeader) {
251 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " iterate over " << std::hex << objectHeader;
252 if (deathCheckerFn(objectHeader) == ObjectStatus::DEAD_OBJECT) {
253 LOG(DEBUG, GC) << "DELETE OBJECT " << GetDebugInfoAboutObject(objectHeader);
254 FreeUnsafe<false>(objectHeader);
255 }
256 });
257 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Collecting for RunSlots allocator finished";
258 }
259
260 template <typename AllocConfigT, typename LockConfigT>
261 template <typename ObjectVisitor>
IterateOverObjects(const ObjectVisitor & objectVisitor)262 void RunSlotsAllocator<AllocConfigT, LockConfigT>::IterateOverObjects(const ObjectVisitor &objectVisitor)
263 {
264 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Iteration over objects started";
265 memoryPool_.IterateOverObjects(objectVisitor);
266 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Iteration over objects finished";
267 }
268
269 template <typename AllocConfigT, typename LockConfigT>
AllocatedByRunSlotsAllocator(void * object)270 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::AllocatedByRunSlotsAllocator(void *object)
271 {
272 return AllocatedByRunSlotsAllocatorUnsafe(object);
273 }
274
275 template <typename AllocConfigT, typename LockConfigT>
AllocatedByRunSlotsAllocatorUnsafe(void * object)276 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::AllocatedByRunSlotsAllocatorUnsafe(void *object)
277 {
278 // NOTE(aemelenko): Add more complex and optimized solution for this method
279 return memoryPool_.IsInMemPools(object);
280 }
281
282 template <typename AllocConfigT, typename LockConfigT>
283 template <bool NEED_LOCK>
284 // CC-OFFNXT(G.FMT.07) project code style
285 inline typename RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsType *
CreateNewRunSlotsFromMemory(size_t slotsSize)286 RunSlotsAllocator<AllocConfigT, LockConfigT>::CreateNewRunSlotsFromMemory(size_t slotsSize)
287 {
288 RunSlotsType *runslots = memoryPool_.template GetNewRunSlots<NEED_LOCK>(slotsSize);
289 if (runslots != nullptr) {
290 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Take " << RUNSLOTS_SIZE << " bytes of memory for new RunSlots instance from "
291 << std::hex << runslots;
292 return runslots;
293 }
294 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "There is no free memory for RunSlots";
295 return runslots;
296 }
297
298 template <typename AllocConfigT, typename LockConfigT>
299 // CC-OFFNXT(G.FUD.06) perf critical
AddMemoryPool(void * mem,size_t size)300 inline bool RunSlotsAllocator<AllocConfigT, LockConfigT>::AddMemoryPool(void *mem, size_t size)
301 {
302 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Get new memory pool with size " << size << " bytes, at addr " << std::hex << mem;
303 // Try to add this memory to the memory_pool_
304 if (mem == nullptr) {
305 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Failed to add memory, the memory is nullptr";
306 return false;
307 }
308 if (size > MIN_POOL_SIZE) {
309 // NOTE(aemelenko): The size of the pool is fixed by now,
310 // because it is requested for correct freed_runslots_bitmap_
311 // workflow. Fix it in #4018
312 LOG_RUNSLOTS_ALLOCATOR(DEBUG)
313 << "Can't add new memory pool to this allocator because the memory size is equal to " << MIN_POOL_SIZE;
314 return false;
315 }
316 if (!memoryPool_.AddNewMemoryPool(mem, size)) {
317 LOG_RUNSLOTS_ALLOCATOR(DEBUG)
318 << "Can't add new memory pool to this allocator. Maybe we already added too much memory pools.";
319 return false;
320 }
321 return true;
322 }
323
324 template <typename AllocConfigT, typename LockConfigT>
325 template <typename MemVisitor>
VisitAndRemoveAllPools(const MemVisitor & memVisitor)326 void RunSlotsAllocator<AllocConfigT, LockConfigT>::VisitAndRemoveAllPools(const MemVisitor &memVisitor)
327 {
328 // We call this method and return pools to the system.
329 // Therefore, delete all objects to clear all external dependences
330 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Clear all objects inside the allocator";
331 memoryPool_.VisitAllPools(memVisitor);
332 }
333
334 template <typename AllocConfigT, typename LockConfigT>
335 template <typename MemVisitor>
VisitAndRemoveFreePools(const MemVisitor & memVisitor)336 void RunSlotsAllocator<AllocConfigT, LockConfigT>::VisitAndRemoveFreePools(const MemVisitor &memVisitor)
337 {
338 ReleaseEmptyRunSlotsPagesUnsafe();
339 // We need to remove RunSlots from RunSlotsList
340 // All of them must be inside free_runslots_ list.
341 memoryPool_.VisitAndRemoveFreePools(memVisitor);
342 }
343
344 template <typename AllocConfigT, typename LockConfigT>
345 template <typename MemVisitor>
IterateOverObjectsInRange(const MemVisitor & memVisitor,void * leftBorder,void * rightBorder)346 void RunSlotsAllocator<AllocConfigT, LockConfigT>::IterateOverObjectsInRange(const MemVisitor &memVisitor,
347 void *leftBorder, void *rightBorder)
348 {
349 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "IterateOverObjectsInRange for range [" << std::hex << leftBorder << ", "
350 << rightBorder << "]";
351 ASSERT(ToUintPtr(rightBorder) >= ToUintPtr(leftBorder));
352 if (!AllocatedByRunSlotsAllocatorUnsafe(leftBorder)) {
353 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "This memory range is not covered by this allocator";
354 return;
355 }
356 // NOTE(aemelenko): These are temporary asserts because we can't do anything
357 // if the range crosses different allocators memory pools
358 ASSERT(ToUintPtr(rightBorder) - ToUintPtr(leftBorder) == (CrossingMapSingleton::GetCrossingMapGranularity() - 1U));
359 ASSERT((ToUintPtr(rightBorder) & (~(CrossingMapSingleton::GetCrossingMapGranularity() - 1U))) ==
360 (ToUintPtr(leftBorder) & (~(CrossingMapSingleton::GetCrossingMapGranularity() - 1U))));
361 // Now we 100% sure that this left_border was allocated by RunSlots allocator.
362 // We can just do alignment for this address and get a pointer to RunSlots header
363 uintptr_t runslotsAddr = (ToUintPtr(leftBorder) >> RUNSLOTS_ALIGNMENT) << RUNSLOTS_ALIGNMENT;
364 while (runslotsAddr < ToUintPtr(rightBorder)) {
365 auto runslots = static_cast<RunSlotsType *>(ToVoidPtr(runslotsAddr));
366 os::memory::LockHolder runslotsLock(*runslots->GetLock());
367 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "IterateOverObjectsInRange, It is RunSlots with addr " << std::hex
368 << static_cast<void *>(runslots);
369 runslots->IterateOverOccupiedSlots(memVisitor);
370 runslotsAddr += RUNSLOTS_SIZE;
371 }
372 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "IterateOverObjectsInRange finished";
373 }
374
375 template <typename AllocConfigT, typename LockConfigT>
VerifyAllocator()376 size_t RunSlotsAllocator<AllocConfigT, LockConfigT>::VerifyAllocator()
377 {
378 size_t failCnt = 0;
379 for (size_t i = 0; i < SLOTS_SIZES_VARIANTS; i++) {
380 RunSlotsType *runslots = nullptr;
381 {
382 os::memory::LockHolder listLock(*runslots_[i].GetLock());
383 runslots = runslots_[i].GetHead();
384 }
385 if (runslots != nullptr) {
386 os::memory::LockHolder runslotsLock(*runslots->GetLock());
387 failCnt += runslots->VerifyRun();
388 }
389 }
390 return failCnt;
391 }
392
393 template <typename AllocConfigT, typename LockConfigT>
ContainObject(const ObjectHeader * obj)394 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::ContainObject(const ObjectHeader *obj)
395 {
396 return AllocatedByRunSlotsAllocatorUnsafe(const_cast<ObjectHeader *>(obj));
397 }
398
399 template <typename AllocConfigT, typename LockConfigT>
IsLive(const ObjectHeader * obj)400 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::IsLive(const ObjectHeader *obj)
401 {
402 ASSERT(ContainObject(obj));
403 uintptr_t runslotsAddr = ToUintPtr(obj) >> RUNSLOTS_ALIGNMENT << RUNSLOTS_ALIGNMENT;
404 auto run = static_cast<RunSlotsType *>(ToVoidPtr(runslotsAddr));
405 if (run->IsEmpty()) {
406 return false;
407 }
408 return run->IsLive(obj);
409 }
410
411 template <typename AllocConfigT, typename LockConfigT>
TrimUnsafe()412 void RunSlotsAllocator<AllocConfigT, LockConfigT>::TrimUnsafe()
413 {
414 // release page in free runslots list
415 auto head = freeRunslots_.GetHead();
416 while (head != nullptr) {
417 auto next = head->GetNextRunSlots();
418 os::mem::ReleasePages(ToUintPtr(head), ToUintPtr(head) + RUNSLOTS_SIZE);
419 head = next;
420 }
421
422 memoryPool_.VisitAllPoolsWithOccupiedSize([](void *mem, size_t usedSize, size_t size) {
423 uintptr_t start = AlignUp(ToUintPtr(mem) + usedSize, ark::os::mem::GetPageSize());
424 uintptr_t end = ToUintPtr(mem) + size;
425 if (end >= start + ark::os::mem::GetPageSize()) {
426 os::mem::ReleasePages(start, end);
427 }
428 });
429 }
430
431 template <typename AllocConfigT, typename LockConfigT>
432 // CC-OFFNXT(G.FUD.06) perf critical
PushToTail(RunSlotsType * runslots)433 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsList::PushToTail(RunSlotsType *runslots)
434 {
435 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Push to tail RunSlots at addr " << std::hex << static_cast<void *>(runslots);
436 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " tail_ " << std::hex << tail_;
437 if (tail_ == nullptr) {
438 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " List was empty, setup head_ and tail_";
439 // this means that head_ == nullptr too
440 head_ = runslots;
441 tail_ = runslots;
442 return;
443 }
444 tail_->SetNextRunSlots(runslots);
445 runslots->SetPrevRunSlots(tail_);
446 tail_ = runslots;
447 tail_->SetNextRunSlots(nullptr);
448 }
449
450 template <typename AllocConfigT, typename LockConfigT>
451 // CC-OFFNXT(G.FUD.06) perf critical
452 // CC-OFFNXT(G.FMT.07) project code style
453 inline typename RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsType *
PopFromHead()454 RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsList::PopFromHead()
455 {
456 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "PopFromHead";
457 if (UNLIKELY(head_ == nullptr)) {
458 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " List is empty, nothing to pop";
459 return nullptr;
460 }
461 RunSlotsType *headRunslots = head_;
462 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " popped from head RunSlots " << std::hex << headRunslots;
463 head_ = headRunslots->GetNextRunSlots();
464 if (head_ == nullptr) {
465 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " Now list is empty";
466 // We pop the last element in the list
467 tail_ = nullptr;
468 } else {
469 head_->SetPrevRunSlots(nullptr);
470 }
471 headRunslots->SetNextRunSlots(nullptr);
472 return headRunslots;
473 }
474
475 template <typename AllocConfigT, typename LockConfigT>
476 // CC-OFFNXT(G.FUD.06) perf critical
477 // CC-OFFNXT(G.FMT.07) project code style
478 inline typename RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsType *
PopFromTail()479 RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsList::PopFromTail()
480 {
481 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "PopFromTail";
482 if (UNLIKELY(tail_ == nullptr)) {
483 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " List is empty, nothing to pop";
484 return nullptr;
485 }
486 RunSlotsType *tailRunslots = tail_;
487 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " popped from tail RunSlots " << std::hex << tailRunslots;
488 tail_ = tailRunslots->GetPrevRunSlots();
489 if (tail_ == nullptr) {
490 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " Now list is empty";
491 // We pop the last element in the list
492 head_ = nullptr;
493 } else {
494 tail_->SetNextRunSlots(nullptr);
495 }
496 tailRunslots->SetPrevRunSlots(nullptr);
497 return tailRunslots;
498 }
499
500 template <typename AllocConfigT, typename LockConfigT>
501 // CC-OFFNXT(G.FUD.06) perf critical
PopFromList(RunSlotsType * runslots)502 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsList::PopFromList(RunSlotsType *runslots)
503 {
504 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "PopFromList RunSlots with addr " << std::hex << runslots;
505 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "head_ = " << std::hex << head_;
506 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "tail_ = " << std::hex << tail_;
507
508 if (runslots == head_) {
509 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "It is RunSlots from the head.";
510 PopFromHead();
511 return;
512 }
513 if (runslots == tail_) {
514 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "It is RunSlots from the tail.";
515 PopFromTail();
516 return;
517 }
518 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Remove RunSlots from the list.";
519 ASSERT(runslots != nullptr);
520 RunSlotsType *nextRunslots = runslots->GetNextRunSlots();
521 RunSlotsType *previousRunslots = runslots->GetPrevRunSlots();
522 ASSERT(nextRunslots != nullptr);
523 ASSERT(previousRunslots != nullptr);
524
525 nextRunslots->SetPrevRunSlots(previousRunslots);
526 previousRunslots->SetNextRunSlots(nextRunslots);
527 runslots->SetNextRunSlots(nullptr);
528 runslots->SetPrevRunSlots(nullptr);
529 }
530
531 template <typename AllocConfigT, typename LockConfigT>
MemPoolManager()532 inline RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::MemPoolManager()
533 {
534 occupiedTail_ = nullptr;
535 freeTail_ = nullptr;
536 partiallyOccupiedHead_ = nullptr;
537 }
538
539 template <typename AllocConfigT, typename LockConfigT>
540 template <bool NEED_LOCK>
541 // CC-OFFNXT(G.FUD.06) perf critical
542 // CC-OFFNXT(G.FMT.07) project code style
543 inline typename RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsType *
GetNewRunSlots(size_t slotsSize)544 RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::GetNewRunSlots(size_t slotsSize)
545 {
546 os::memory::WriteLockHolder<typename LockConfigT::PoolLock, NEED_LOCK> wlock(lock_);
547 RunSlotsType *newRunslots = nullptr;
548 if (partiallyOccupiedHead_ != nullptr) {
549 newRunslots = partiallyOccupiedHead_->GetMemoryForRunSlots(slotsSize);
550 ASSERT(newRunslots != nullptr);
551 if (UNLIKELY(!partiallyOccupiedHead_->HasMemoryForRunSlots())) {
552 partiallyOccupiedHead_ = partiallyOccupiedHead_->GetNext();
553 ASSERT((partiallyOccupiedHead_ == nullptr) || (partiallyOccupiedHead_->HasMemoryForRunSlots()));
554 }
555 } else if (freeTail_ != nullptr) {
556 LOG_RUNSLOTS_ALLOCATOR(DEBUG)
557 << "MemPoolManager: occupied_tail_ doesn't have memory for RunSlots, get new pool from free pools";
558 PoolListElement *freeElement = freeTail_;
559 freeTail_ = freeTail_->GetPrev();
560
561 freeElement->PopFromList();
562 freeElement->SetPrev(occupiedTail_);
563
564 if (occupiedTail_ != nullptr) {
565 ASSERT(occupiedTail_->GetNext() == nullptr);
566 occupiedTail_->SetNext(freeElement);
567 }
568 occupiedTail_ = freeElement;
569
570 if (partiallyOccupiedHead_ == nullptr) {
571 partiallyOccupiedHead_ = occupiedTail_;
572 ASSERT(partiallyOccupiedHead_->HasMemoryForRunSlots());
573 }
574
575 ASSERT(occupiedTail_->GetNext() == nullptr);
576 newRunslots = occupiedTail_->GetMemoryForRunSlots(slotsSize);
577 ASSERT(newRunslots != nullptr);
578 }
579 return newRunslots;
580 }
581
582 template <typename AllocConfigT, typename LockConfigT>
AddNewMemoryPool(void * mem,size_t size)583 inline bool RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::AddNewMemoryPool(void *mem, size_t size)
584 {
585 os::memory::WriteLockHolder wlock(lock_);
586 PoolListElement *newPool = PoolListElement::Create(mem, size, freeTail_);
587 if (freeTail_ != nullptr) {
588 ASSERT(freeTail_->GetNext() == nullptr);
589 freeTail_->SetNext(newPool);
590 }
591 freeTail_ = newPool;
592 ASAN_POISON_MEMORY_REGION(mem, size);
593 // To not unpoison it every time at access.
594 ASAN_UNPOISON_MEMORY_REGION(mem, sizeof(PoolListElement));
595 return true;
596 }
597
598 template <typename AllocConfigT, typename LockConfigT>
599 // CC-OFFNXT(G.FUD.06) perf critical
ReturnAndReleaseRunSlotsMemory(RunSlotsType * runslots)600 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::ReturnAndReleaseRunSlotsMemory(
601 RunSlotsType *runslots)
602 {
603 os::memory::WriteLockHolder wlock(lock_);
604 auto pool = static_cast<PoolListElement *>(ToVoidPtr(runslots->GetPoolPointer()));
605 if (!pool->HasMemoryForRunSlots()) {
606 ASSERT(partiallyOccupiedHead_ != pool);
607 // We should add move this pool to the end of a occupied list
608 if (pool != occupiedTail_) {
609 pool->PopFromList();
610 pool->SetPrev(occupiedTail_);
611 if (UNLIKELY(occupiedTail_ == nullptr)) {
612 UNREACHABLE();
613 }
614 occupiedTail_->SetNext(pool);
615 occupiedTail_ = pool;
616 } else {
617 ASSERT(partiallyOccupiedHead_ == nullptr);
618 }
619 if (partiallyOccupiedHead_ == nullptr) {
620 partiallyOccupiedHead_ = occupiedTail_;
621 }
622 }
623
624 pool->AddFreedRunSlots(runslots);
625 ASSERT(partiallyOccupiedHead_->HasMemoryForRunSlots());
626
627 // Start address from which we can release pages
628 uintptr_t startAddr = AlignUp(ToUintPtr(runslots), os::mem::GetPageSize());
629 // End address before which we can release pages
630 uintptr_t endAddr = os::mem::AlignDownToPageSize(ToUintPtr(runslots) + RUNSLOTS_SIZE);
631 if (startAddr < endAddr) {
632 os::mem::ReleasePages(startAddr, endAddr);
633 }
634 }
635
636 template <typename AllocConfigT, typename LockConfigT>
IsInMemPools(void * object)637 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::IsInMemPools(void *object)
638 {
639 os::memory::ReadLockHolder rlock(lock_);
640 PoolListElement *current = occupiedTail_;
641 while (current != nullptr) {
642 if (current->IsInUsedMemory(object)) {
643 return true;
644 }
645 current = current->GetPrev();
646 }
647 return false;
648 }
649
650 template <typename AllocConfigT, typename LockConfigT>
651 template <typename ObjectVisitor>
IterateOverObjects(const ObjectVisitor & objectVisitor)652 void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::IterateOverObjects(
653 const ObjectVisitor &objectVisitor)
654 {
655 PoolListElement *currentPool = nullptr;
656 {
657 os::memory::ReadLockHolder rlock(lock_);
658 currentPool = occupiedTail_;
659 }
660 while (currentPool != nullptr) {
661 currentPool->IterateOverRunSlots([¤tPool, &objectVisitor](RunSlotsType *runslots) {
662 os::memory::LockHolder runslotsLock(*runslots->GetLock());
663 UNUSED_VAR(currentPool); // For release build
664 ASSERT(runslots->GetPoolPointer() == ToUintPtr(currentPool));
665 runslots->IterateOverOccupiedSlots(objectVisitor);
666 return true;
667 });
668 {
669 os::memory::ReadLockHolder rlock(lock_);
670 currentPool = currentPool->GetPrev();
671 }
672 }
673 }
674
675 template <typename AllocConfigT, typename LockConfigT>
676 template <typename MemVisitor>
VisitAllPools(const MemVisitor & memVisitor)677 void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::VisitAllPools(const MemVisitor &memVisitor)
678 {
679 os::memory::WriteLockHolder wlock(lock_);
680 PoolListElement *currentPool = occupiedTail_;
681 while (currentPool != nullptr) {
682 // Use tmp in case if visitor with side effects
683 PoolListElement *tmp = currentPool->GetPrev();
684 memVisitor(currentPool->GetPoolMemory(), currentPool->GetSize());
685 currentPool = tmp;
686 }
687 }
688
689 template <typename AllocConfigT, typename LockConfigT>
690 template <typename MemVisitor>
VisitAllPoolsWithOccupiedSize(const MemVisitor & memVisitor)691 void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::VisitAllPoolsWithOccupiedSize(
692 const MemVisitor &memVisitor)
693 {
694 os::memory::WriteLockHolder wlock(lock_);
695 PoolListElement *currentPool = occupiedTail_;
696 while (currentPool != nullptr) {
697 // Use tmp in case if visitor with side effects
698 PoolListElement *tmp = currentPool->GetPrev();
699 memVisitor(currentPool->GetPoolMemory(), currentPool->GetOccupiedSize(), currentPool->GetSize());
700 currentPool = tmp;
701 }
702 }
703
704 template <typename AllocConfigT, typename LockConfigT>
705 template <typename MemVisitor>
VisitAndRemoveFreePools(const MemVisitor & memVisitor)706 void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::VisitAndRemoveFreePools(const MemVisitor &memVisitor)
707 {
708 os::memory::WriteLockHolder wlock(lock_);
709 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "VisitAllFreePools inside RunSlotsAllocator";
710 // First, iterate over totally free pools:
711 PoolListElement *currentPool = freeTail_;
712 while (currentPool != nullptr) {
713 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "VisitAllFreePools: Visit free pool with addr " << std::hex
714 << currentPool->GetPoolMemory() << " and size " << std::dec
715 << currentPool->GetSize();
716 // Use tmp in case if visitor with side effects
717 PoolListElement *tmp = currentPool->GetPrev();
718 memVisitor(currentPool->GetPoolMemory(), currentPool->GetSize());
719 currentPool = tmp;
720 }
721 freeTail_ = nullptr;
722 // Second, try to find free pool in occupied:
723 currentPool = occupiedTail_;
724 while (currentPool != nullptr) {
725 // Use tmp in case if visitor with side effects
726 PoolListElement *tmp = currentPool->GetPrev();
727 if (!currentPool->HasUsedMemory()) {
728 LOG_RUNSLOTS_ALLOCATOR(DEBUG)
729 << "VisitAllFreePools: Visit occupied pool with addr " << std::hex << currentPool->GetPoolMemory()
730 << " and size " << std::dec << currentPool->GetSize();
731 // This Pool doesn't have any occupied memory in RunSlots
732 // Therefore, we can free it
733 if (occupiedTail_ == currentPool) {
734 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "VisitAllFreePools: Update occupied_tail_";
735 occupiedTail_ = currentPool->GetPrev();
736 }
737 if (currentPool == partiallyOccupiedHead_) {
738 partiallyOccupiedHead_ = partiallyOccupiedHead_->GetNext();
739 ASSERT((partiallyOccupiedHead_ == nullptr) || (partiallyOccupiedHead_->HasMemoryForRunSlots()));
740 }
741 currentPool->PopFromList();
742 memVisitor(currentPool->GetPoolMemory(), currentPool->GetSize());
743 }
744 currentPool = tmp;
745 }
746 }
747
748 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
749 template <typename AllocConfigT, typename LockConfigT>
PoolListElement()750 inline RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::PoolListElement()
751 {
752 startMem_ = 0;
753 poolMem_ = 0;
754 size_ = 0;
755 freePtr_ = 0;
756 prevPool_ = nullptr;
757 nextPool_ = nullptr;
758 freededRunslotsCount_ = 0;
759 memset_s(storageForBitmap_.data(), sizeof(BitMapStorageType), 0, sizeof(BitMapStorageType));
760 }
761
762 template <typename AllocConfigT, typename LockConfigT>
PopFromList()763 void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::PopFromList()
764 {
765 if (nextPool_ != nullptr) {
766 nextPool_->SetPrev(prevPool_);
767 }
768 if (prevPool_ != nullptr) {
769 prevPool_->SetNext(nextPool_);
770 }
771 nextPool_ = nullptr;
772 prevPool_ = nullptr;
773 }
774
775 template <typename AllocConfigT, typename LockConfigT>
GetFirstRunSlotsBlock(uintptr_t mem)776 uintptr_t RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::GetFirstRunSlotsBlock(
777 uintptr_t mem)
778 {
779 return AlignUp(mem, 1UL << RUNSLOTS_ALIGNMENT);
780 }
781
782 template <typename AllocConfigT, typename LockConfigT>
783 // CC-OFFNXT(G.FUD.06) perf critical
Initialize(void * poolMem,uintptr_t unoccupiedMem,size_t size,PoolListElement * prev)784 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::Initialize(
785 void *poolMem, uintptr_t unoccupiedMem, size_t size, PoolListElement *prev)
786 {
787 startMem_ = unoccupiedMem;
788 poolMem_ = ToUintPtr(poolMem);
789 size_ = size;
790 // Atomic with release order reason: data race with free_ptr_ with dependecies on writes before the store which
791 // should become visible acquire
792 freePtr_.store(GetFirstRunSlotsBlock(startMem_), std::memory_order_release);
793 prevPool_ = prev;
794 nextPool_ = nullptr;
795 freededRunslotsCount_ = 0;
796 freedRunslotsBitmap_.ReInitializeMemoryRange(poolMem);
797 ASSERT(freedRunslotsBitmap_.FindFirstMarkedChunks() == nullptr);
798 // Atomic with acquire order reason: data race with free_ptr_ with dependecies on reads after the load which should
799 // become visible
800 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "PoolMemory: first free RunSlots block = " << std::hex
801 << freePtr_.load(std::memory_order_acquire);
802 }
803
804 template <typename AllocConfigT, typename LockConfigT>
805 // CC-OFFNXT(G.FUD.06) perf critical
806 // CC-OFFNXT(G.FMT.07) project code style
807 inline typename RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsType *
GetMemoryForRunSlots(size_t slotsSize)808 RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::GetMemoryForRunSlots(size_t slotsSize)
809 {
810 if (!HasMemoryForRunSlots()) {
811 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "PoolMemory: There is no free memory for RunSlots";
812 return nullptr;
813 }
814 RunSlotsType *runslots = GetFreedRunSlots(slotsSize);
815 if (runslots == nullptr) {
816 // Atomic with acquire order reason: data race with free_ptr_ with dependecies on reads after the load which
817 // should become visible
818 uintptr_t oldMem = freePtr_.load(std::memory_order_acquire);
819 ASSERT(poolMem_ + size_ >= oldMem + RUNSLOTS_SIZE);
820
821 // Initialize it firstly before updating free ptr
822 // because it will be visible outside after that.
823 runslots = static_cast<RunSlotsType *>(ToVoidPtr(oldMem));
824 runslots->Initialize(slotsSize, ToUintPtr(this), true);
825 // Atomic with acq_rel order reason: data race with free_ptr_ with dependecies on reads after the load and on
826 // writes before the store
827 freePtr_.fetch_add(RUNSLOTS_SIZE, std::memory_order_acq_rel);
828 // Atomic with acquire order reason: data race with free_ptr_ with dependecies on reads after the load which
829 // should become visible
830 ASSERT(freePtr_.load(std::memory_order_acquire) == (oldMem + RUNSLOTS_SIZE));
831 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "PoolMemory: Took memory for RunSlots from addr " << std::hex
832 << ToVoidPtr(oldMem)
833 // Atomic with acquire order reason: data race with free_ptr_
834 << ". New first free RunSlots block = "
835 << ToVoidPtr(freePtr_.load(std::memory_order_acquire));
836 }
837 ASSERT(runslots != nullptr);
838 return runslots;
839 }
840
841 template <typename AllocConfigT, typename LockConfigT>
842 template <typename RunSlotsVisitor>
IterateOverRunSlots(const RunSlotsVisitor & runslotsVisitor)843 void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::IterateOverRunSlots(
844 const RunSlotsVisitor &runslotsVisitor)
845 {
846 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Iterating over runslots inside pool with address" << std::hex << poolMem_
847 << " with size " << std::dec << size_ << " bytes";
848 uintptr_t currentRunslot = GetFirstRunSlotsBlock(startMem_);
849 // Atomic with acquire order reason: data race with free_ptr_ with dependecies on reads after the load which should
850 // become visible
851 uintptr_t lastRunslot = freePtr_.load(std::memory_order_acquire);
852 while (currentRunslot < lastRunslot) {
853 ASSERT(startMem_ <= currentRunslot);
854 if (!freedRunslotsBitmap_.AtomicTest(ToVoidPtr(currentRunslot))) {
855 auto curRs = static_cast<RunSlotsType *>(ToVoidPtr(currentRunslot));
856 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Iterating. Process RunSlots " << std::hex << curRs;
857 if (!runslotsVisitor(curRs)) {
858 return;
859 }
860 }
861 currentRunslot += RUNSLOTS_SIZE;
862 }
863 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Iterating runslots inside this pool finished";
864 }
865
866 template <typename AllocConfigT, typename LockConfigT>
HasUsedMemory()867 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::HasUsedMemory()
868 {
869 uintptr_t currentRunslot = GetFirstRunSlotsBlock(startMem_);
870 // Atomic with acquire order reason: data race with free_ptr_ with dependecies on reads after the load which should
871 // become visible
872 uintptr_t lastRunslot = freePtr_.load(std::memory_order_acquire);
873 while (currentRunslot < lastRunslot) {
874 ASSERT(startMem_ <= currentRunslot);
875 if (!freedRunslotsBitmap_.AtomicTest(ToVoidPtr(currentRunslot))) {
876 // We have runslots instance which is in use somewhere.
877 return true;
878 }
879 currentRunslot += RUNSLOTS_SIZE;
880 }
881 return false;
882 }
883
884 template <typename AllocConfigT, typename LockConfigT>
GetOccupiedSize()885 size_t RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::GetOccupiedSize()
886 {
887 if (!IsInitialized()) {
888 return 0;
889 }
890 // Atomic with acquire order reason: data race with free_ptr_ with dependecies on reads after the load which should
891 // become visible
892 return freePtr_.load(std::memory_order_acquire) - poolMem_;
893 }
894
895 template <typename AllocConfigT, typename LockConfigT>
IsInUsedMemory(void * object)896 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::IsInUsedMemory(void *object)
897 {
898 uintptr_t memPointer = startMem_;
899 ASSERT(!((ToUintPtr(object) < GetFirstRunSlotsBlock(memPointer)) && (ToUintPtr(object) >= memPointer)));
900 // Atomic with acquire order reason: data race with free_ptr_ with dependecies on reads after the load which should
901 // become visible
902 bool isInAllocatedMemory = (ToUintPtr(object) < freePtr_.load(std::memory_order_acquire)) &&
903 (ToUintPtr(object) >= GetFirstRunSlotsBlock(memPointer));
904 return isInAllocatedMemory && !IsInFreedRunSlots(object);
905 }
906
907 template <typename AllocConfigT, typename LockConfigT>
908 // CC-OFFNXT(G.FMT.07) project code style
909 typename RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsType *
GetFreedRunSlots(size_t slotsSize)910 RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::GetFreedRunSlots(size_t slotsSize)
911 {
912 auto slots = static_cast<RunSlotsType *>(freedRunslotsBitmap_.FindFirstMarkedChunks());
913 if (slots == nullptr) {
914 ASSERT(freededRunslotsCount_ == 0);
915 return nullptr;
916 }
917
918 // Initialize it firstly before updating bitmap
919 // because it will be visible outside after that.
920 slots->Initialize(slotsSize, ToUintPtr(this), true);
921
922 ASSERT(freededRunslotsCount_ > 0);
923 [[maybe_unused]] bool oldVal = freedRunslotsBitmap_.AtomicTestAndClear(slots);
924 ASSERT(oldVal);
925 freededRunslotsCount_--;
926
927 return slots;
928 }
929
930 template <typename AllocConfigT, typename LockConfigT>
HasMemoryForRunSlots()931 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::HasMemoryForRunSlots()
932 {
933 if (!IsInitialized()) {
934 return false;
935 }
936 // Atomic with acquire order reason: data race with free_ptr_ with dependecies on reads after the load which should
937 // become visible
938 bool hasFreeMemory = (freePtr_.load(std::memory_order_acquire) + RUNSLOTS_SIZE) <= (poolMem_ + size_);
939 bool hasFreedRunslots = (freededRunslotsCount_ > 0);
940 ASSERT(hasFreedRunslots == (freedRunslotsBitmap_.FindFirstMarkedChunks() != nullptr));
941 return hasFreeMemory || hasFreedRunslots;
942 }
943
944 #undef LOG_RUNSLOTS_ALLOCATOR
945
946 } // namespace ark::mem
947 #endif // PANDA_RUNTIME_MEM_RUNSLOTS_ALLOCATOR_INL_H
948