1 /**
2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #ifndef PANDA_RUNTIME_MEM_RUNSLOTS_ALLOCATOR_INL_H
16 #define PANDA_RUNTIME_MEM_RUNSLOTS_ALLOCATOR_INL_H
17
18 #include <securec.h>
19 #include "libpandabase/utils/asan_interface.h"
20 #include "runtime/mem/alloc_config.h"
21 #include "runtime/mem/object_helpers.h"
22 #include "runtime/mem/runslots_allocator.h"
23
24 namespace panda::mem {
25
26 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
27 #define LOG_RUNSLOTS_ALLOCATOR(level) LOG(level, ALLOC) << "RunSlotsAllocator: "
28
29 template <typename AllocConfigT, typename LockConfigT>
RunSlotsAllocator(MemStatsType * mem_stats,SpaceType type_allocation)30 inline RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsAllocator(MemStatsType *mem_stats,
31 SpaceType type_allocation)
32 : type_allocation_(type_allocation), mem_stats_(mem_stats)
33 {
34 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Initializing RunSlotsAllocator";
35 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Initializing RunSlotsAllocator finished";
36 }
37
38 template <typename AllocConfigT, typename LockConfigT>
~RunSlotsAllocator()39 inline RunSlotsAllocator<AllocConfigT, LockConfigT>::~RunSlotsAllocator()
40 {
41 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Destroying RunSlotsAllocator";
42 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Destroying RunSlotsAllocator finished";
43 }
44
45 template <typename AllocConfigT, typename LockConfigT>
46 template <bool need_lock, bool disable_use_free_runslots>
Alloc(size_t size,Alignment align)47 inline void *RunSlotsAllocator<AllocConfigT, LockConfigT>::Alloc(size_t size, Alignment align)
48 {
49 using ListLock = typename LockConfigT::ListLock;
50 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Try to allocate " << size << " bytes of memory with align " << align;
51 if (size == 0) {
52 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Failed to allocate - size of object is null";
53 return nullptr;
54 }
55 // TODO(aemelenko): Do smth more memory flexible with alignment
56 size_t alignment_size = GetAlignmentInBytes(align);
57 if (alignment_size > size) {
58 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Change size of allocation to " << alignment_size
59 << " bytes because of alignment";
60 size = alignment_size;
61 }
62 if (size > RunSlotsType::MaxSlotSize()) {
63 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Failed to allocate - size of object is too big";
64 return nullptr;
65 }
66 size_t slot_size_power_of_two = RunSlotsType::ConvertToPowerOfTwoUnsafe(size);
67 size_t array_index = slot_size_power_of_two;
68 const size_t run_slot_size = 1UL << slot_size_power_of_two;
69 RunSlotsType *runslots = nullptr;
70 bool used_from_freed_runslots_list = false;
71 {
72 os::memory::LockHolder<ListLock, need_lock> list_lock(*runslots_[array_index].GetLock());
73 runslots = runslots_[array_index].PopFromHead();
74 }
75 if (runslots == nullptr) {
76 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "We don't have free RunSlots for size " << run_slot_size
77 << ". Try to get new one.";
78 if (disable_use_free_runslots) {
79 return nullptr;
80 }
81 {
82 os::memory::LockHolder<ListLock, need_lock> list_lock(*free_runslots_.GetLock());
83 runslots = free_runslots_.PopFromHead();
84 }
85 if (runslots != nullptr) {
86 used_from_freed_runslots_list = true;
87 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Get RunSlots from free list";
88 } else {
89 LOG_RUNSLOTS_ALLOCATOR(DEBUG)
90 << "Failed to get new RunSlots from free list, try to allocate one from memory";
91 runslots = CreateNewRunSlotsFromMemory(run_slot_size);
92 if (runslots == nullptr) {
93 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Failed to allocate an object, couldn't create RunSlots";
94 return nullptr;
95 }
96 }
97 }
98 void *allocated_mem = nullptr;
99 {
100 os::memory::LockHolder<typename LockConfigT::RunSlotsLock, need_lock> runslots_lock(*runslots->GetLock());
101 if (used_from_freed_runslots_list) {
102 // TODO(aemelenko): if we allocate and free two different size objects,
103 // we will have a perf issue here. Maybe it is better to delete free_runslots_?
104 if (runslots->GetSlotsSize() != run_slot_size) {
105 runslots->Initialize(run_slot_size, runslots->GetPoolPointer(), false);
106 }
107 }
108 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Used runslots with addr " << std::hex << runslots;
109 allocated_mem = static_cast<void *>(runslots->PopFreeSlot());
110 if (allocated_mem == nullptr) {
111 UNREACHABLE();
112 }
113 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Allocate a memory at address " << std::hex << allocated_mem;
114 if (!runslots->IsFull()) {
115 os::memory::LockHolder<ListLock, need_lock> list_lock(*runslots_[array_index].GetLock());
116 // We didn't take the last free slot from this RunSlots
117 runslots_[array_index].PushToTail(runslots);
118 }
119 ASAN_UNPOISON_MEMORY_REGION(allocated_mem, size);
120 AllocConfigT::OnAlloc(run_slot_size, type_allocation_, mem_stats_);
121 AllocConfigT::MemoryInit(allocated_mem, size);
122 }
123 return allocated_mem;
124 }
125
126 template <typename AllocConfigT, typename LockConfigT>
Free(void * mem)127 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::Free(void *mem)
128 {
129 FreeUnsafe<true>(mem);
130 }
131
132 template <typename AllocConfigT, typename LockConfigT>
ReleaseEmptyRunSlotsPagesUnsafe()133 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::ReleaseEmptyRunSlotsPagesUnsafe()
134 {
135 // Iterate over free_runslots list:
136 RunSlotsType *cur_free_runslots = nullptr;
137 {
138 os::memory::LockHolder list_lock(*free_runslots_.GetLock());
139 cur_free_runslots = free_runslots_.PopFromHead();
140 }
141 while (cur_free_runslots != nullptr) {
142 memory_pool_.ReturnAndReleaseRunSlotsMemory(cur_free_runslots);
143
144 {
145 os::memory::LockHolder list_lock(*free_runslots_.GetLock());
146 cur_free_runslots = free_runslots_.PopFromHead();
147 }
148 }
149 }
150
151 template <typename AllocConfigT, typename LockConfigT>
FreeUnsafeInternal(RunSlotsType * runslots,void * mem)152 inline bool RunSlotsAllocator<AllocConfigT, LockConfigT>::FreeUnsafeInternal(RunSlotsType *runslots, void *mem)
153 {
154 bool need_to_add_to_free_list = false;
155 // TODO(aemelenko): Here can be a performance issue when we allocate/deallocate one object.
156 const size_t run_slot_size = runslots->GetSlotsSize();
157 size_t array_index = RunSlotsType::ConvertToPowerOfTwoUnsafe(run_slot_size);
158 bool runslots_was_full = runslots->IsFull();
159 runslots->PushFreeSlot(static_cast<FreeSlot *>(mem));
160 /**
161 * RunSlotsAllocator doesn't know this real size which we use in slot, so we record upper bound - size of the
162 * slot.
163 */
164 AllocConfigT::OnFree(run_slot_size, type_allocation_, mem_stats_);
165 ASAN_POISON_MEMORY_REGION(mem, run_slot_size);
166 ASSERT(!(runslots_was_full && runslots->IsEmpty())); // Runslots has more that one slot inside.
167 if (runslots_was_full) {
168 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "This RunSlots was full and now we must add it to the RunSlots list";
169
170 os::memory::LockHolder list_lock(*runslots_[array_index].GetLock());
171 #if PANDA_ENABLE_SLOW_DEBUG
172 ASSERT(!runslots_[array_index].IsInThisList(runslots));
173 #endif
174 runslots_[array_index].PushToTail(runslots);
175 } else if (runslots->IsEmpty()) {
176 os::memory::LockHolder list_lock(*runslots_[array_index].GetLock());
177 // Check that we may took this runslots from list on alloc
178 // and waiting for lock
179 if ((runslots->GetNextRunSlots() != nullptr) || (runslots->GetPrevRunSlots() != nullptr) ||
180 (runslots_[array_index].GetHead() == runslots)) {
181 LOG_RUNSLOTS_ALLOCATOR(DEBUG)
182 << "This RunSlots is empty. Pop it from runslots list and push it to free_runslots_";
183 runslots_[array_index].PopFromList(runslots);
184 need_to_add_to_free_list = true;
185 }
186 }
187
188 return need_to_add_to_free_list;
189 }
190
191 template <typename AllocConfigT, typename LockConfigT>
192 template <bool LockRunSlots>
FreeUnsafe(void * mem)193 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::FreeUnsafe(void *mem)
194 {
195 if (UNLIKELY(mem == nullptr)) {
196 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Try to free memory at invalid addr 0";
197 return;
198 }
199 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Try to free object at address " << std::hex << mem;
200 #ifndef NDEBUG
201 if (!AllocatedByRunSlotsAllocatorUnsafe(mem)) {
202 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "This object was not allocated by this allocator";
203 return;
204 }
205 #endif // !NDEBUG
206
207 // Now we 100% sure that this object was allocated by RunSlots allocator.
208 // We can just do alignment for this address and get a pointer to RunSlots header
209 uintptr_t runslots_addr = (ToUintPtr(mem) >> RUNSLOTS_ALIGNMENT) << RUNSLOTS_ALIGNMENT;
210 auto runslots = static_cast<RunSlotsType *>(ToVoidPtr(runslots_addr));
211 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "It is RunSlots with addr " << std::hex << static_cast<void *>(runslots);
212
213 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
214 if constexpr (LockRunSlots) {
215 runslots->GetLock()->Lock();
216 }
217
218 bool need_to_add_to_free_list = FreeUnsafeInternal(runslots, mem);
219
220 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
221 if constexpr (LockRunSlots) {
222 runslots->GetLock()->Unlock();
223 }
224
225 if (need_to_add_to_free_list) {
226 os::memory::LockHolder list_lock(*free_runslots_.GetLock());
227 free_runslots_.PushToTail(runslots);
228 }
229 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Freed object at address " << std::hex << mem;
230 }
231
232 template <typename AllocConfigT, typename LockConfigT>
Collect(const GCObjectVisitor & death_checker_fn)233 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::Collect(const GCObjectVisitor &death_checker_fn)
234 {
235 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Collecting for RunSlots allocator started";
236 IterateOverObjects([&](ObjectHeader *object_header) {
237 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " iterate over " << std::hex << object_header;
238 if (death_checker_fn(object_header) == ObjectStatus::DEAD_OBJECT) {
239 LOG(DEBUG, GC) << "DELETE OBJECT " << GetDebugInfoAboutObject(object_header);
240 FreeUnsafe<false>(object_header);
241 }
242 });
243 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Collecting for RunSlots allocator finished";
244 }
245
246 template <typename AllocConfigT, typename LockConfigT>
247 template <typename ObjectVisitor>
IterateOverObjects(const ObjectVisitor & object_visitor)248 void RunSlotsAllocator<AllocConfigT, LockConfigT>::IterateOverObjects(const ObjectVisitor &object_visitor)
249 {
250 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Iteration over objects started";
251 memory_pool_.IterateOverObjects(object_visitor);
252 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Iteration over objects finished";
253 }
254
255 template <typename AllocConfigT, typename LockConfigT>
AllocatedByRunSlotsAllocator(void * object)256 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::AllocatedByRunSlotsAllocator(void *object)
257 {
258 return AllocatedByRunSlotsAllocatorUnsafe(object);
259 }
260
261 template <typename AllocConfigT, typename LockConfigT>
AllocatedByRunSlotsAllocatorUnsafe(void * object)262 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::AllocatedByRunSlotsAllocatorUnsafe(void *object)
263 {
264 // TODO(aemelenko): Add more complex and optimized solution for this method
265 return memory_pool_.IsInMemPools(object);
266 }
267
268 template <typename AllocConfigT, typename LockConfigT>
269 template <bool need_lock>
270 inline typename RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsType *
CreateNewRunSlotsFromMemory(size_t slots_size)271 RunSlotsAllocator<AllocConfigT, LockConfigT>::CreateNewRunSlotsFromMemory(size_t slots_size)
272 {
273 RunSlotsType *runslots = memory_pool_.template GetNewRunSlots<need_lock>(slots_size);
274 if (runslots != nullptr) {
275 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Take " << RUNSLOTS_SIZE << " bytes of memory for new RunSlots instance from "
276 << std::hex << runslots;
277 return runslots;
278 }
279 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "There is no free memory for RunSlots";
280 return runslots;
281 }
282
283 template <typename AllocConfigT, typename LockConfigT>
AddMemoryPool(void * mem,size_t size)284 inline bool RunSlotsAllocator<AllocConfigT, LockConfigT>::AddMemoryPool(void *mem, size_t size)
285 {
286 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Get new memory pool with size " << size << " bytes, at addr " << std::hex << mem;
287 // Try to add this memory to the memory_pool_
288 if (mem == nullptr) {
289 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Failed to add memory, the memory is nullptr";
290 return false;
291 }
292 if (size > MIN_POOL_SIZE) {
293 // TODO(aemelenko): The size of the pool is fixed by now,
294 // because it is requested for correct freed_runslots_bitmap_
295 // workflow. Fix it in #4018
296 LOG_RUNSLOTS_ALLOCATOR(DEBUG)
297 << "Can't add new memory pool to this allocator because the memory size is equal to " << MIN_POOL_SIZE;
298 return false;
299 }
300 if (!memory_pool_.AddNewMemoryPool(mem, size)) {
301 LOG_RUNSLOTS_ALLOCATOR(DEBUG)
302 << "Can't add new memory pool to this allocator. Maybe we already added too much memory pools.";
303 return false;
304 }
305 return true;
306 }
307
308 template <typename AllocConfigT, typename LockConfigT>
309 template <typename MemVisitor>
VisitAndRemoveAllPools(const MemVisitor & mem_visitor)310 void RunSlotsAllocator<AllocConfigT, LockConfigT>::VisitAndRemoveAllPools(const MemVisitor &mem_visitor)
311 {
312 // We call this method and return pools to the system.
313 // Therefore, delete all objects to clear all external dependences
314 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Clear all objects inside the allocator";
315 memory_pool_.VisitAllPools(mem_visitor);
316 }
317
318 template <typename AllocConfigT, typename LockConfigT>
319 template <typename MemVisitor>
VisitAndRemoveFreePools(const MemVisitor & mem_visitor)320 void RunSlotsAllocator<AllocConfigT, LockConfigT>::VisitAndRemoveFreePools(const MemVisitor &mem_visitor)
321 {
322 ReleaseEmptyRunSlotsPagesUnsafe();
323 // We need to remove RunSlots from RunSlotsList
324 // All of them must be inside free_runslots_ list.
325 memory_pool_.VisitAndRemoveFreePools(mem_visitor);
326 }
327
328 template <typename AllocConfigT, typename LockConfigT>
329 template <typename MemVisitor>
IterateOverObjectsInRange(const MemVisitor & mem_visitor,void * left_border,void * right_border)330 void RunSlotsAllocator<AllocConfigT, LockConfigT>::IterateOverObjectsInRange(const MemVisitor &mem_visitor,
331 void *left_border, void *right_border)
332 {
333 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "IterateOverObjectsInRange for range [" << std::hex << left_border << ", "
334 << right_border << "]";
335 ASSERT(ToUintPtr(right_border) >= ToUintPtr(left_border));
336 if (!AllocatedByRunSlotsAllocatorUnsafe(left_border)) {
337 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "This memory range is not covered by this allocator";
338 return;
339 }
340 // TODO(aemelenko): These are temporary asserts because we can't do anything
341 // if the range crosses different allocators memory pools
342 ASSERT(ToUintPtr(right_border) - ToUintPtr(left_border) ==
343 (CrossingMapSingleton::GetCrossingMapGranularity() - 1U));
344 ASSERT((ToUintPtr(right_border) & (~(CrossingMapSingleton::GetCrossingMapGranularity() - 1U))) ==
345 (ToUintPtr(left_border) & (~(CrossingMapSingleton::GetCrossingMapGranularity() - 1U))));
346 // Now we 100% sure that this left_border was allocated by RunSlots allocator.
347 // We can just do alignment for this address and get a pointer to RunSlots header
348 uintptr_t runslots_addr = (ToUintPtr(left_border) >> RUNSLOTS_ALIGNMENT) << RUNSLOTS_ALIGNMENT;
349 while (runslots_addr < ToUintPtr(right_border)) {
350 auto runslots = static_cast<RunSlotsType *>(ToVoidPtr(runslots_addr));
351 os::memory::LockHolder runslots_lock(*runslots->GetLock());
352 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "IterateOverObjectsInRange, It is RunSlots with addr " << std::hex
353 << static_cast<void *>(runslots);
354 runslots->IterateOverOccupiedSlots(mem_visitor);
355 runslots_addr += RUNSLOTS_SIZE;
356 }
357 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "IterateOverObjectsInRange finished";
358 }
359
360 template <typename AllocConfigT, typename LockConfigT>
VerifyAllocator()361 size_t RunSlotsAllocator<AllocConfigT, LockConfigT>::VerifyAllocator()
362 {
363 size_t fail_cnt = 0;
364 for (size_t i = 0; i < SLOTS_SIZES_VARIANTS; i++) {
365 RunSlotsType *runslots = nullptr;
366 {
367 os::memory::LockHolder list_lock(*runslots_[i].GetLock());
368 runslots = runslots_[i].GetHead();
369 }
370 if (runslots != nullptr) {
371 os::memory::LockHolder runslots_lock(*runslots->GetLock());
372 fail_cnt += runslots->VerifyRun();
373 }
374 }
375 return fail_cnt;
376 }
377
378 template <typename AllocConfigT, typename LockConfigT>
ContainObject(const ObjectHeader * obj)379 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::ContainObject(const ObjectHeader *obj)
380 {
381 return AllocatedByRunSlotsAllocatorUnsafe(const_cast<ObjectHeader *>(obj));
382 }
383
384 template <typename AllocConfigT, typename LockConfigT>
IsLive(const ObjectHeader * obj)385 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::IsLive(const ObjectHeader *obj)
386 {
387 ASSERT(ContainObject(obj));
388 uintptr_t runslots_addr = ToUintPtr(obj) >> RUNSLOTS_ALIGNMENT << RUNSLOTS_ALIGNMENT;
389 auto run = static_cast<RunSlotsType *>(ToVoidPtr(runslots_addr));
390 if (run->IsEmpty()) {
391 return false;
392 }
393 return run->IsLive(obj);
394 }
395
396 template <typename AllocConfigT, typename LockConfigT>
TrimUnsafe()397 void RunSlotsAllocator<AllocConfigT, LockConfigT>::TrimUnsafe()
398 {
399 // release page in free runslots list
400 auto head = free_runslots_.GetHead();
401 while (head != nullptr) {
402 auto next = head->GetNextRunSlots();
403 os::mem::ReleasePages(ToUintPtr(head), ToUintPtr(head) + RUNSLOTS_SIZE);
404 head = next;
405 }
406
407 memory_pool_.VisitAllPoolsWithOccupiedSize([](void *mem, size_t used_size, size_t size) {
408 uintptr_t start = AlignUp(ToUintPtr(mem) + used_size, panda::os::mem::GetPageSize());
409 uintptr_t end = ToUintPtr(mem) + size;
410 if (end >= start + panda::os::mem::GetPageSize()) {
411 os::mem::ReleasePages(start, end);
412 }
413 });
414 }
415
416 template <typename AllocConfigT, typename LockConfigT>
PushToTail(RunSlotsType * runslots)417 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsList::PushToTail(RunSlotsType *runslots)
418 {
419 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Push to tail RunSlots at addr " << std::hex << static_cast<void *>(runslots);
420 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " tail_ " << std::hex << tail_;
421 if (tail_ == nullptr) {
422 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " List was empty, setup head_ and tail_";
423 // this means that head_ == nullptr too
424 head_ = runslots;
425 tail_ = runslots;
426 return;
427 }
428 tail_->SetNextRunSlots(runslots);
429 runslots->SetPrevRunSlots(tail_);
430 tail_ = runslots;
431 tail_->SetNextRunSlots(nullptr);
432 }
433
434 template <typename AllocConfigT, typename LockConfigT>
435 inline typename RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsType *
PopFromHead()436 RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsList::PopFromHead()
437 {
438 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "PopFromHead";
439 if (UNLIKELY(head_ == nullptr)) {
440 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " List is empty, nothing to pop";
441 return nullptr;
442 }
443 RunSlotsType *head_runslots = head_;
444 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " popped from head RunSlots " << std::hex << head_runslots;
445 head_ = head_runslots->GetNextRunSlots();
446 if (head_ == nullptr) {
447 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " Now list is empty";
448 // We pop the last element in the list
449 tail_ = nullptr;
450 } else {
451 head_->SetPrevRunSlots(nullptr);
452 }
453 head_runslots->SetNextRunSlots(nullptr);
454 return head_runslots;
455 }
456
457 template <typename AllocConfigT, typename LockConfigT>
458 inline typename RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsType *
PopFromTail()459 RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsList::PopFromTail()
460 {
461 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "PopFromTail";
462 if (UNLIKELY(tail_ == nullptr)) {
463 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " List is empty, nothing to pop";
464 return nullptr;
465 }
466 RunSlotsType *tail_runslots = tail_;
467 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " popped from tail RunSlots " << std::hex << tail_runslots;
468 tail_ = tail_runslots->GetPrevRunSlots();
469 if (tail_ == nullptr) {
470 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << " Now list is empty";
471 // We pop the last element in the list
472 head_ = nullptr;
473 } else {
474 tail_->SetNextRunSlots(nullptr);
475 }
476 tail_runslots->SetPrevRunSlots(nullptr);
477 return tail_runslots;
478 }
479
480 template <typename AllocConfigT, typename LockConfigT>
PopFromList(RunSlotsType * runslots)481 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsList::PopFromList(RunSlotsType *runslots)
482 {
483 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "PopFromList RunSlots with addr " << std::hex << runslots;
484 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "head_ = " << std::hex << head_;
485 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "tail_ = " << std::hex << tail_;
486
487 if (runslots == head_) {
488 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "It is RunSlots from the head.";
489 PopFromHead();
490 return;
491 }
492 if (runslots == tail_) {
493 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "It is RunSlots from the tail.";
494 PopFromTail();
495 return;
496 }
497 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Remove RunSlots from the list.";
498 ASSERT(runslots != nullptr);
499 RunSlotsType *next_runslots = runslots->GetNextRunSlots();
500 RunSlotsType *previous_runslots = runslots->GetPrevRunSlots();
501 ASSERT(next_runslots != nullptr);
502 ASSERT(previous_runslots != nullptr);
503
504 next_runslots->SetPrevRunSlots(previous_runslots);
505 previous_runslots->SetNextRunSlots(next_runslots);
506 runslots->SetNextRunSlots(nullptr);
507 runslots->SetPrevRunSlots(nullptr);
508 }
509
510 template <typename AllocConfigT, typename LockConfigT>
MemPoolManager()511 inline RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::MemPoolManager()
512 {
513 occupied_tail_ = nullptr;
514 free_tail_ = nullptr;
515 partially_occupied_head_ = nullptr;
516 }
517
518 template <typename AllocConfigT, typename LockConfigT>
519 template <bool need_lock>
520 inline typename RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsType *
GetNewRunSlots(size_t slots_size)521 RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::GetNewRunSlots(size_t slots_size)
522 {
523 os::memory::WriteLockHolder<typename LockConfigT::PoolLock, need_lock> wlock(lock_);
524 RunSlotsType *new_runslots = nullptr;
525 if (partially_occupied_head_ != nullptr) {
526 new_runslots = partially_occupied_head_->GetMemoryForRunSlots(slots_size);
527 ASSERT(new_runslots != nullptr);
528 if (UNLIKELY(!partially_occupied_head_->HasMemoryForRunSlots())) {
529 partially_occupied_head_ = partially_occupied_head_->GetNext();
530 ASSERT((partially_occupied_head_ == nullptr) || (partially_occupied_head_->HasMemoryForRunSlots()));
531 }
532 } else if (free_tail_ != nullptr) {
533 LOG_RUNSLOTS_ALLOCATOR(DEBUG)
534 << "MemPoolManager: occupied_tail_ doesn't have memory for RunSlots, get new pool from free pools";
535 PoolListElement *free_element = free_tail_;
536 free_tail_ = free_tail_->GetPrev();
537
538 free_element->PopFromList();
539 free_element->SetPrev(occupied_tail_);
540
541 if (occupied_tail_ != nullptr) {
542 ASSERT(occupied_tail_->GetNext() == nullptr);
543 occupied_tail_->SetNext(free_element);
544 }
545 occupied_tail_ = free_element;
546
547 if (partially_occupied_head_ == nullptr) {
548 partially_occupied_head_ = occupied_tail_;
549 ASSERT(partially_occupied_head_->HasMemoryForRunSlots());
550 }
551
552 ASSERT(occupied_tail_->GetNext() == nullptr);
553 new_runslots = occupied_tail_->GetMemoryForRunSlots(slots_size);
554 ASSERT(new_runslots != nullptr);
555 }
556 return new_runslots;
557 }
558
559 template <typename AllocConfigT, typename LockConfigT>
AddNewMemoryPool(void * mem,size_t size)560 inline bool RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::AddNewMemoryPool(void *mem, size_t size)
561 {
562 os::memory::WriteLockHolder wlock(lock_);
563 PoolListElement *new_pool = PoolListElement::Create(mem, size, free_tail_);
564 if (free_tail_ != nullptr) {
565 ASSERT(free_tail_->GetNext() == nullptr);
566 free_tail_->SetNext(new_pool);
567 }
568 free_tail_ = new_pool;
569 ASAN_POISON_MEMORY_REGION(mem, size);
570 // To not unpoison it every time at access.
571 ASAN_UNPOISON_MEMORY_REGION(mem, sizeof(PoolListElement));
572 return true;
573 }
574
575 template <typename AllocConfigT, typename LockConfigT>
ReturnAndReleaseRunSlotsMemory(RunSlotsType * runslots)576 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::ReturnAndReleaseRunSlotsMemory(
577 RunSlotsType *runslots)
578 {
579 os::memory::WriteLockHolder wlock(lock_);
580 auto pool = static_cast<PoolListElement *>(ToVoidPtr(runslots->GetPoolPointer()));
581 if (!pool->HasMemoryForRunSlots()) {
582 ASSERT(partially_occupied_head_ != pool);
583 // We should add move this pool to the end of a occupied list
584 if (pool != occupied_tail_) {
585 pool->PopFromList();
586 pool->SetPrev(occupied_tail_);
587 if (UNLIKELY(occupied_tail_ == nullptr)) {
588 UNREACHABLE();
589 }
590 occupied_tail_->SetNext(pool);
591 occupied_tail_ = pool;
592 } else {
593 ASSERT(partially_occupied_head_ == nullptr);
594 }
595 if (partially_occupied_head_ == nullptr) {
596 partially_occupied_head_ = occupied_tail_;
597 }
598 }
599
600 pool->AddFreedRunSlots(runslots);
601 ASSERT(partially_occupied_head_->HasMemoryForRunSlots());
602
603 // Start address from which we can release pages
604 uintptr_t start_addr = AlignUp(ToUintPtr(runslots), os::mem::GetPageSize());
605 // End address before which we can release pages
606 uintptr_t end_addr = os::mem::AlignDownToPageSize(ToUintPtr(runslots) + RUNSLOTS_SIZE);
607 if (start_addr < end_addr) {
608 os::mem::ReleasePages(start_addr, end_addr);
609 }
610 }
611
612 template <typename AllocConfigT, typename LockConfigT>
IsInMemPools(void * object)613 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::IsInMemPools(void *object)
614 {
615 os::memory::ReadLockHolder rlock(lock_);
616 PoolListElement *current = occupied_tail_;
617 while (current != nullptr) {
618 if (current->IsInUsedMemory(object)) {
619 return true;
620 }
621 current = current->GetPrev();
622 }
623 return false;
624 }
625
626 template <typename AllocConfigT, typename LockConfigT>
627 template <typename ObjectVisitor>
IterateOverObjects(const ObjectVisitor & object_visitor)628 void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::IterateOverObjects(
629 const ObjectVisitor &object_visitor)
630 {
631 PoolListElement *current_pool = nullptr;
632 {
633 os::memory::ReadLockHolder rlock(lock_);
634 current_pool = occupied_tail_;
635 }
636 while (current_pool != nullptr) {
637 current_pool->IterateOverRunSlots([&](RunSlotsType *runslots) {
638 os::memory::LockHolder runslots_lock(*runslots->GetLock());
639 ASSERT(runslots->GetPoolPointer() == ToUintPtr(current_pool));
640 runslots->IterateOverOccupiedSlots(object_visitor);
641 return true;
642 });
643 {
644 os::memory::ReadLockHolder rlock(lock_);
645 current_pool = current_pool->GetPrev();
646 }
647 }
648 }
649
650 template <typename AllocConfigT, typename LockConfigT>
651 template <typename MemVisitor>
VisitAllPools(const MemVisitor & mem_visitor)652 void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::VisitAllPools(const MemVisitor &mem_visitor)
653 {
654 os::memory::WriteLockHolder wlock(lock_);
655 PoolListElement *current_pool = occupied_tail_;
656 while (current_pool != nullptr) {
657 // Use tmp in case if visitor with side effects
658 PoolListElement *tmp = current_pool->GetPrev();
659 mem_visitor(current_pool->GetPoolMemory(), current_pool->GetSize());
660 current_pool = tmp;
661 }
662 }
663
664 template <typename AllocConfigT, typename LockConfigT>
665 template <typename MemVisitor>
VisitAllPoolsWithOccupiedSize(const MemVisitor & mem_visitor)666 void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::VisitAllPoolsWithOccupiedSize(
667 const MemVisitor &mem_visitor)
668 {
669 os::memory::WriteLockHolder wlock(lock_);
670 PoolListElement *current_pool = occupied_tail_;
671 while (current_pool != nullptr) {
672 // Use tmp in case if visitor with side effects
673 PoolListElement *tmp = current_pool->GetPrev();
674 mem_visitor(current_pool->GetPoolMemory(), current_pool->GetOccupiedSize(), current_pool->GetSize());
675 current_pool = tmp;
676 }
677 }
678
679 template <typename AllocConfigT, typename LockConfigT>
680 template <typename MemVisitor>
VisitAndRemoveFreePools(const MemVisitor & mem_visitor)681 void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::VisitAndRemoveFreePools(
682 const MemVisitor &mem_visitor)
683 {
684 os::memory::WriteLockHolder wlock(lock_);
685 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "VisitAllFreePools inside RunSlotsAllocator";
686 // First, iterate over totally free pools:
687 PoolListElement *current_pool = free_tail_;
688 while (current_pool != nullptr) {
689 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "VisitAllFreePools: Visit free pool with addr " << std::hex
690 << current_pool->GetPoolMemory() << " and size " << std::dec
691 << current_pool->GetSize();
692 // Use tmp in case if visitor with side effects
693 PoolListElement *tmp = current_pool->GetPrev();
694 mem_visitor(current_pool->GetPoolMemory(), current_pool->GetSize());
695 current_pool = tmp;
696 }
697 free_tail_ = nullptr;
698 // Second, try to find free pool in occupied:
699 current_pool = occupied_tail_;
700 while (current_pool != nullptr) {
701 // Use tmp in case if visitor with side effects
702 PoolListElement *tmp = current_pool->GetPrev();
703 if (!current_pool->HasUsedMemory()) {
704 LOG_RUNSLOTS_ALLOCATOR(DEBUG)
705 << "VisitAllFreePools: Visit occupied pool with addr " << std::hex << current_pool->GetPoolMemory()
706 << " and size " << std::dec << current_pool->GetSize();
707 // This Pool doesn't have any occupied memory in RunSlots
708 // Therefore, we can free it
709 if (occupied_tail_ == current_pool) {
710 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "VisitAllFreePools: Update occupied_tail_";
711 occupied_tail_ = current_pool->GetPrev();
712 }
713 if (current_pool == partially_occupied_head_) {
714 partially_occupied_head_ = partially_occupied_head_->GetNext();
715 ASSERT((partially_occupied_head_ == nullptr) || (partially_occupied_head_->HasMemoryForRunSlots()));
716 }
717 current_pool->PopFromList();
718 mem_visitor(current_pool->GetPoolMemory(), current_pool->GetSize());
719 }
720 current_pool = tmp;
721 }
722 }
723
724 template <typename AllocConfigT, typename LockConfigT>
PoolListElement()725 inline RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::PoolListElement()
726 {
727 start_mem_ = 0;
728 pool_mem_ = 0;
729 size_ = 0;
730 free_ptr_ = 0;
731 prev_pool_ = nullptr;
732 next_pool_ = nullptr;
733 freeded_runslots_count_ = 0;
734 memset_s(storage_for_bitmap_.data(), sizeof(BitMapStorageType), 0, sizeof(BitMapStorageType));
735 }
736
737 template <typename AllocConfigT, typename LockConfigT>
PopFromList()738 void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::PopFromList()
739 {
740 if (next_pool_ != nullptr) {
741 next_pool_->SetPrev(prev_pool_);
742 }
743 if (prev_pool_ != nullptr) {
744 prev_pool_->SetNext(next_pool_);
745 }
746 next_pool_ = nullptr;
747 prev_pool_ = nullptr;
748 }
749
750 template <typename AllocConfigT, typename LockConfigT>
GetFirstRunSlotsBlock(uintptr_t mem)751 uintptr_t RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::GetFirstRunSlotsBlock(
752 uintptr_t mem)
753 {
754 return AlignUp(mem, 1UL << RUNSLOTS_ALIGNMENT);
755 }
756
757 template <typename AllocConfigT, typename LockConfigT>
Initialize(void * pool_mem,uintptr_t unoccupied_mem,size_t size,PoolListElement * prev)758 inline void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::Initialize(
759 void *pool_mem, uintptr_t unoccupied_mem, size_t size, PoolListElement *prev)
760 {
761 start_mem_ = unoccupied_mem;
762 pool_mem_ = ToUintPtr(pool_mem);
763 size_ = size;
764 // Atomic with release order reason: data race with free_ptr_ with dependecies on writes before the store which
765 // should become visible acquire
766 free_ptr_.store(GetFirstRunSlotsBlock(start_mem_), std::memory_order_release);
767 prev_pool_ = prev;
768 next_pool_ = nullptr;
769 freeded_runslots_count_ = 0;
770 freed_runslots_bitmap_.ReInitializeMemoryRange(pool_mem);
771 ASSERT(freed_runslots_bitmap_.FindFirstMarkedChunks() == nullptr);
772 // Atomic with acquire order reason: data race with free_ptr_ with dependecies on reads after the load which should
773 // become visible
774 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "PoolMemory: first free RunSlots block = " << std::hex
775 << free_ptr_.load(std::memory_order_acquire);
776 }
777
778 template <typename AllocConfigT, typename LockConfigT>
779 inline typename RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsType *
GetMemoryForRunSlots(size_t slots_size)780 RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::GetMemoryForRunSlots(size_t slots_size)
781 {
782 if (!HasMemoryForRunSlots()) {
783 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "PoolMemory: There is no free memory for RunSlots";
784 return nullptr;
785 }
786 RunSlotsType *runslots = GetFreedRunSlots(slots_size);
787 if (runslots == nullptr) {
788 // Atomic with acquire order reason: data race with free_ptr_ with dependecies on reads after the load which
789 // should become visible
790 uintptr_t old_mem = free_ptr_.load(std::memory_order_acquire);
791 ASSERT(pool_mem_ + size_ >= old_mem + RUNSLOTS_SIZE);
792
793 // Initialize it firstly before updating free ptr
794 // because it will be visible outside after that.
795 runslots = static_cast<RunSlotsType *>(ToVoidPtr(old_mem));
796 runslots->Initialize(slots_size, ToUintPtr(this), true);
797 // Atomic with acq_rel order reason: data race with free_ptr_ with dependecies on reads after the load and on
798 // writes before the store
799 free_ptr_.fetch_add(RUNSLOTS_SIZE, std::memory_order_acq_rel);
800 // Atomic with acquire order reason: data race with free_ptr_ with dependecies on reads after the load which
801 // should become visible
802 ASSERT(free_ptr_.load(std::memory_order_acquire) == (old_mem + RUNSLOTS_SIZE));
803 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "PoolMemory: Took memory for RunSlots from addr " << std::hex
804 << ToVoidPtr(old_mem)
805 // Atomic with acquire order reason: data race with free_ptr_
806 << ". New first free RunSlots block = "
807 << ToVoidPtr(free_ptr_.load(std::memory_order_acquire));
808 }
809 ASSERT(runslots != nullptr);
810 return runslots;
811 }
812
813 template <typename AllocConfigT, typename LockConfigT>
814 template <typename RunSlotsVisitor>
IterateOverRunSlots(const RunSlotsVisitor & runslots_visitor)815 void RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::IterateOverRunSlots(
816 const RunSlotsVisitor &runslots_visitor)
817 {
818 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Iterating over runslots inside pool with address" << std::hex << pool_mem_
819 << " with size " << std::dec << size_ << " bytes";
820 uintptr_t current_runslot = GetFirstRunSlotsBlock(start_mem_);
821 // Atomic with acquire order reason: data race with free_ptr_ with dependecies on reads after the load which should
822 // become visible
823 uintptr_t last_runslot = free_ptr_.load(std::memory_order_acquire);
824 while (current_runslot < last_runslot) {
825 ASSERT(start_mem_ <= current_runslot);
826 if (!freed_runslots_bitmap_.AtomicTest(ToVoidPtr(current_runslot))) {
827 auto cur_rs = static_cast<RunSlotsType *>(ToVoidPtr(current_runslot));
828 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Iterating. Process RunSlots " << std::hex << cur_rs;
829 if (!runslots_visitor(cur_rs)) {
830 return;
831 }
832 }
833 current_runslot += RUNSLOTS_SIZE;
834 }
835 LOG_RUNSLOTS_ALLOCATOR(DEBUG) << "Iterating runslots inside this pool finished";
836 }
837
838 template <typename AllocConfigT, typename LockConfigT>
HasUsedMemory()839 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::HasUsedMemory()
840 {
841 uintptr_t current_runslot = GetFirstRunSlotsBlock(start_mem_);
842 // Atomic with acquire order reason: data race with free_ptr_ with dependecies on reads after the load which should
843 // become visible
844 uintptr_t last_runslot = free_ptr_.load(std::memory_order_acquire);
845 while (current_runslot < last_runslot) {
846 ASSERT(start_mem_ <= current_runslot);
847 if (!freed_runslots_bitmap_.AtomicTest(ToVoidPtr(current_runslot))) {
848 // We have runslots instance which is in use somewhere.
849 return true;
850 }
851 current_runslot += RUNSLOTS_SIZE;
852 }
853 return false;
854 }
855
856 template <typename AllocConfigT, typename LockConfigT>
GetOccupiedSize()857 size_t RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::GetOccupiedSize()
858 {
859 if (!IsInitialized()) {
860 return 0;
861 }
862 // Atomic with acquire order reason: data race with free_ptr_ with dependecies on reads after the load which should
863 // become visible
864 return free_ptr_.load(std::memory_order_acquire) - pool_mem_;
865 }
866
867 template <typename AllocConfigT, typename LockConfigT>
IsInUsedMemory(void * object)868 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::IsInUsedMemory(void *object)
869 {
870 uintptr_t mem_pointer = start_mem_;
871 ASSERT(!((ToUintPtr(object) < GetFirstRunSlotsBlock(mem_pointer)) && (ToUintPtr(object) >= mem_pointer)));
872 // Atomic with acquire order reason: data race with free_ptr_ with dependecies on reads after the load which should
873 // become visible
874 bool is_in_allocated_memory = (ToUintPtr(object) < free_ptr_.load(std::memory_order_acquire)) &&
875 (ToUintPtr(object) >= GetFirstRunSlotsBlock(mem_pointer));
876 return is_in_allocated_memory && !IsInFreedRunSlots(object);
877 }
878
879 template <typename AllocConfigT, typename LockConfigT>
880 typename RunSlotsAllocator<AllocConfigT, LockConfigT>::RunSlotsType *
GetFreedRunSlots(size_t slots_size)881 RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::GetFreedRunSlots(size_t slots_size)
882 {
883 auto slots = static_cast<RunSlotsType *>(freed_runslots_bitmap_.FindFirstMarkedChunks());
884 if (slots == nullptr) {
885 ASSERT(freeded_runslots_count_ == 0);
886 return nullptr;
887 }
888
889 // Initialize it firstly before updating bitmap
890 // because it will be visible outside after that.
891 slots->Initialize(slots_size, ToUintPtr(this), true);
892
893 ASSERT(freeded_runslots_count_ > 0);
894 [[maybe_unused]] bool old_val = freed_runslots_bitmap_.AtomicTestAndClear(slots);
895 ASSERT(old_val);
896 freeded_runslots_count_--;
897
898 return slots;
899 }
900
901 template <typename AllocConfigT, typename LockConfigT>
HasMemoryForRunSlots()902 bool RunSlotsAllocator<AllocConfigT, LockConfigT>::MemPoolManager::PoolListElement::HasMemoryForRunSlots()
903 {
904 if (!IsInitialized()) {
905 return false;
906 }
907 // Atomic with acquire order reason: data race with free_ptr_ with dependecies on reads after the load which should
908 // become visible
909 bool has_free_memory = (free_ptr_.load(std::memory_order_acquire) + RUNSLOTS_SIZE) <= (pool_mem_ + size_);
910 bool has_freed_runslots = (freeded_runslots_count_ > 0);
911 ASSERT(has_freed_runslots == (freed_runslots_bitmap_.FindFirstMarkedChunks() != nullptr));
912 return has_free_memory || has_freed_runslots;
913 }
914
915 #undef LOG_RUNSLOTS_ALLOCATOR
916
917 } // namespace panda::mem
918 #endif // PANDA_RUNTIME_MEM_RUNSLOTS_ALLOCATOR_INL_H
919