1 /**
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #ifndef PANDA_RUNTIME_MEM_RUNSLOTS_ALLOCATOR_H
16 #define PANDA_RUNTIME_MEM_RUNSLOTS_ALLOCATOR_H
17
18 #include <algorithm>
19 #include <array>
20 #include <cstddef>
21
22 #include "libpandabase/macros.h"
23 #include "libpandabase/mem/mem.h"
24 #include "libpandabase/mem/space.h"
25 #include "libpandabase/utils/logger.h"
26 #include "runtime/mem/runslots.h"
27 #include "runtime/mem/gc/bitmap.h"
28 #include "runtime/mem/lock_config_helper.h"
29
30 namespace ark::mem {
31
32 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
33 #define LOG_RUNSLOTS_ALLOCATOR(level) LOG(level, ALLOC) << "RunSlotsAllocator: "
34
35 class RunSlotsAllocatorLockConfig {
36 public:
37 class CommonLock {
38 public:
39 using PoolLock = os::memory::RWLock;
40 using ListLock = os::memory::Mutex;
41 using RunSlotsLock = RunSlotsLockConfig::CommonLock;
42 };
43
44 class DummyLock {
45 public:
46 using PoolLock = os::memory::DummyLock;
47 using ListLock = os::memory::DummyLock;
48 using RunSlotsLock = RunSlotsLockConfig::DummyLock;
49 };
50
51 template <MTModeT MT_MODE>
52 using ParameterizedLock = typename LockConfigHelper<RunSlotsAllocatorLockConfig, MT_MODE>::Value;
53 };
54
55 template <typename T, typename AllocConfigT, typename LockConfigT>
56 class RunSlotsAllocatorAdapter;
57 enum class InternalAllocatorConfig;
58 template <InternalAllocatorConfig CONFIG>
59 class InternalAllocator;
60
61 /**
62 * RunSlotsAllocator is an allocator based on RunSlots instance.
63 * It gets a big pool of memory from OS and uses it for creating RunSlots with different slot sizes.
64 */
65 template <typename AllocConfigT, typename LockConfigT = RunSlotsAllocatorLockConfig::CommonLock>
66 class RunSlotsAllocator {
67 public:
68 explicit RunSlotsAllocator(MemStatsType *memStats, SpaceType typeAllocation = SpaceType::SPACE_TYPE_OBJECT);
69 ~RunSlotsAllocator();
70 NO_COPY_SEMANTIC(RunSlotsAllocator);
71 NO_MOVE_SEMANTIC(RunSlotsAllocator);
72
73 template <typename T, typename... Args>
New(Args &&...args)74 [[nodiscard]] T *New(Args &&...args)
75 {
76 auto p = reinterpret_cast<void *>(Alloc(sizeof(T)));
77 new (p) T(std::forward<Args>(args)...);
78 return reinterpret_cast<T *>(p);
79 }
80
81 template <typename T>
82 [[nodiscard]] T *AllocArray(size_t arrLength);
83
84 template <bool NEED_LOCK = true, bool DISABLE_USE_FREE_RUNSLOTS = false>
85 [[nodiscard]] void *Alloc(size_t size, Alignment align = DEFAULT_ALIGNMENT);
86
87 void Free(void *mem);
88
89 void Collect(const GCObjectVisitor &deathCheckerFn);
90
91 bool AddMemoryPool(void *mem, size_t size);
92
93 /**
94 * @brief Iterates over all objects allocated by this allocator.
95 * @tparam MemVisitor
96 * @param mem_visitor - function pointer or functor
97 */
98 template <typename ObjectVisitor>
99 void IterateOverObjects(const ObjectVisitor &objectVisitor);
100
101 /**
102 * @brief Iterates over all memory pools used by this allocator
103 * and remove them from the allocator structure.
104 * NOTE: This method can't be used to clear all internal allocator
105 * information and reuse the allocator somewhere else.
106 * @tparam MemVisitor
107 * @param mem_visitor - function pointer or functor
108 */
109 template <typename MemVisitor>
110 void VisitAndRemoveAllPools(const MemVisitor &memVisitor);
111
112 /**
113 * @brief Visit memory pools that can be returned to the system in this allocator
114 * and remove them from the allocator structure.
115 * @tparam MemVisitor
116 * @param mem_visitor - function pointer or functor
117 */
118 template <typename MemVisitor>
119 void VisitAndRemoveFreePools(const MemVisitor &memVisitor);
120
121 /**
122 * @brief Iterates over objects in the range inclusively.
123 * @tparam MemVisitor
124 * @param mem_visitor - function pointer or functor
125 * @param left_border - a pointer to the first byte of the range
126 * @param right_border - a pointer to the last byte of the range
127 */
128 template <typename MemVisitor>
129 void IterateOverObjectsInRange(const MemVisitor &memVisitor, void *leftBorder, void *rightBorder);
130
131 RunSlotsAllocatorAdapter<void, AllocConfigT, LockConfigT> Adapter();
132
133 /**
134 * @brief returns maximum size which can be allocated by this allocator
135 * @return
136 */
GetMaxSize()137 static constexpr size_t GetMaxSize()
138 {
139 return RunSlotsType::MaxSlotSize();
140 }
141
142 /**
143 * @brief returns minimum pool size which can be added to this allocator
144 * @return
145 */
GetMinPoolSize()146 static constexpr size_t GetMinPoolSize()
147 {
148 return MIN_POOL_SIZE;
149 }
150
PoolAlign()151 static constexpr size_t PoolAlign()
152 {
153 return DEFAULT_ALIGNMENT_IN_BYTES;
154 }
155
156 size_t VerifyAllocator();
157
158 bool ContainObject(const ObjectHeader *obj);
159
160 bool IsLive(const ObjectHeader *obj);
161
GetAllocatorType()162 static constexpr AllocatorType GetAllocatorType()
163 {
164 return AllocatorType::RUNSLOTS_ALLOCATOR;
165 }
166
167 #ifdef PANDA_MEASURE_FRAGMENTATION
GetAllocatedBytes()168 size_t GetAllocatedBytes() const
169 {
170 // Atomic with relaxed order reason: order is not required
171 return allocatedBytes_.load(std::memory_order_relaxed);
172 }
173 #endif
174
175 private:
176 using RunSlotsType = RunSlots<typename LockConfigT::RunSlotsLock>;
177
178 static constexpr size_t MIN_POOL_SIZE = PANDA_DEFAULT_ALLOCATOR_POOL_SIZE;
179
180 class RunSlotsList {
181 public:
RunSlotsList()182 RunSlotsList()
183 {
184 head_ = nullptr;
185 tail_ = nullptr;
186 }
187
GetLock()188 typename LockConfigT::ListLock *GetLock()
189 {
190 return &lock_;
191 }
192
GetHead()193 RunSlotsType *GetHead()
194 {
195 return head_;
196 }
197
GetTail()198 RunSlotsType *GetTail()
199 {
200 return tail_;
201 }
202
203 void PushToTail(RunSlotsType *runslots);
204
205 RunSlotsType *PopFromHead();
206
207 RunSlotsType *PopFromTail();
208
209 void PopFromList(RunSlotsType *runslots);
210
IsInThisList(RunSlotsType * runslots)211 bool IsInThisList(RunSlotsType *runslots)
212 {
213 RunSlotsType *current = head_;
214 while (current != nullptr) {
215 if (current == runslots) {
216 return true;
217 }
218 current = current->GetNextRunSlots();
219 }
220 return false;
221 }
222
223 ~RunSlotsList() = default;
224
225 NO_COPY_SEMANTIC(RunSlotsList);
226 NO_MOVE_SEMANTIC(RunSlotsList);
227
228 private:
229 RunSlotsType *head_;
230 RunSlotsType *tail_;
231 typename LockConfigT::ListLock lock_;
232 };
233
234 /**
235 * MemPoolManager class is used for manage memory which we get from OS.
236 * Current implementation limits the amount of memory pools which can be managed by this class.
237 */
238
239 // MemPoolManager structure:
240 //
241 // part_occupied_head_ - is a pointer to the first partially occupied pool in occupied list
242 // |
243 // | occupied_tail_ - is a pointer to the last occupied pool
244 // | |
245 // | part occupied |
246 // v pools v
247 // |x|x|x|x|x|x|x|x|x|x|x|x|x|x|x|
248 // ^ ^
249 // | occupied pools |
250 //
251 // free_tail_ - is a pointer to the last totally free pool.
252 // |
253 // |
254 // v
255 // |0|0|0|0|0|0|0|0|0|0|
256 // ^ ^
257 // | free pools |
258 //
259
260 class MemPoolManager {
261 public:
262 explicit MemPoolManager();
263
264 template <bool NEED_LOCK = true>
265 RunSlotsType *GetNewRunSlots(size_t slotsSize);
266
267 bool AddNewMemoryPool(void *mem, size_t size);
268
269 template <typename ObjectVisitor>
270 void IterateOverObjects(const ObjectVisitor &objectVisitor);
271
272 template <typename MemVisitor>
273 void VisitAllPools(const MemVisitor &memVisitor);
274
275 template <typename MemVisitor>
276 void VisitAllPoolsWithOccupiedSize(const MemVisitor &memVisitor);
277
278 template <typename MemVisitor>
279 void VisitAndRemoveFreePools(const MemVisitor &memVisitor);
280
281 void ReturnAndReleaseRunSlotsMemory(RunSlotsType *runslots);
282
283 bool IsInMemPools(void *object);
284
285 ~MemPoolManager() = default;
286
287 NO_COPY_SEMANTIC(MemPoolManager);
288 NO_MOVE_SEMANTIC(MemPoolManager);
289
290 private:
291 class PoolListElement {
292 public:
293 PoolListElement();
294
295 void Initialize(void *poolMem, uintptr_t unoccupiedMem, size_t size, PoolListElement *prev);
296
Create(void * mem,size_t size,PoolListElement * prev)297 static PoolListElement *Create(void *mem, size_t size, PoolListElement *prev)
298 {
299 LOG_RUNSLOTS_ALLOCATOR(DEBUG)
300 << "PoolMemory: Create new instance with size " << size << " bytes at addr " << std::hex << mem;
301 ASSERT(mem != nullptr);
302 ASSERT(sizeof(PoolListElement) <= RUNSLOTS_SIZE);
303 ASAN_UNPOISON_MEMORY_REGION(mem, sizeof(PoolListElement));
304 auto newElement = new (mem) PoolListElement();
305 uintptr_t unoccupiedMem = AlignUp(ToUintPtr(mem) + sizeof(PoolListElement), RUNSLOTS_SIZE);
306 ASSERT(unoccupiedMem < ToUintPtr(mem) + size);
307 newElement->Initialize(mem, unoccupiedMem, size, prev);
308 return newElement;
309 }
310
311 bool HasMemoryForRunSlots();
312
IsInitialized()313 bool IsInitialized()
314 {
315 return startMem_ != 0;
316 }
317
318 RunSlotsType *GetMemoryForRunSlots(size_t slotsSize);
319
320 template <typename RunSlotsVisitor>
321 void IterateOverRunSlots(const RunSlotsVisitor &runslotsVisitor);
322
323 bool HasUsedMemory();
324
325 size_t GetOccupiedSize();
326
327 bool IsInUsedMemory(void *object);
328
GetPoolMemory()329 void *GetPoolMemory()
330 {
331 return ToVoidPtr(poolMem_);
332 }
333
GetSize()334 size_t GetSize()
335 {
336 return size_;
337 }
338
GetNext()339 PoolListElement *GetNext() const
340 {
341 return nextPool_;
342 }
343
GetPrev()344 PoolListElement *GetPrev() const
345 {
346 return prevPool_;
347 }
348
SetPrev(PoolListElement * prev)349 void SetPrev(PoolListElement *prev)
350 {
351 prevPool_ = prev;
352 }
353
SetNext(PoolListElement * next)354 void SetNext(PoolListElement *next)
355 {
356 nextPool_ = next;
357 }
358
359 void PopFromList();
360
AddFreedRunSlots(RunSlotsType * slots)361 void AddFreedRunSlots(RunSlotsType *slots)
362 {
363 [[maybe_unused]] bool oldVal = freedRunslotsBitmap_.AtomicTestAndSet(slots);
364 ASSERT(!oldVal);
365 freededRunslotsCount_++;
366 ASAN_POISON_MEMORY_REGION(slots, RUNSLOTS_SIZE);
367 }
368
IsInFreedRunSlots(void * addr)369 bool IsInFreedRunSlots(void *addr)
370 {
371 void *alignAddr = ToVoidPtr((ToUintPtr(addr) >> RUNSLOTS_ALIGNMENT) << RUNSLOTS_ALIGNMENT);
372 return freedRunslotsBitmap_.TestIfAddrValid(alignAddr);
373 }
374
GetFreedRunSlotsCount()375 size_t GetFreedRunSlotsCount()
376 {
377 return freededRunslotsCount_;
378 }
379
380 ~PoolListElement() = default;
381
382 NO_COPY_SEMANTIC(PoolListElement);
383 NO_MOVE_SEMANTIC(PoolListElement);
384
385 private:
386 using MemBitmapClass = MemBitmap<RUNSLOTS_SIZE, uintptr_t>;
387 using BitMapStorageType = std::array<uint8_t, MemBitmapClass::GetBitMapSizeInByte(MIN_POOL_SIZE)>;
388
389 uintptr_t GetFirstRunSlotsBlock(uintptr_t mem);
390
391 RunSlotsType *GetFreedRunSlots(size_t slotsSize);
392
393 uintptr_t poolMem_;
394 uintptr_t startMem_;
395 std::atomic<uintptr_t> freePtr_;
396 size_t size_;
397 PoolListElement *nextPool_;
398 PoolListElement *prevPool_;
399 size_t freededRunslotsCount_;
400 BitMapStorageType storageForBitmap_;
401 MemBitmapClass freedRunslotsBitmap_ {nullptr, MIN_POOL_SIZE, storageForBitmap_.data()};
402 };
403
404 PoolListElement *freeTail_;
405 PoolListElement *partiallyOccupiedHead_;
406 PoolListElement *occupiedTail_;
407 typename LockConfigT::PoolLock lock_;
408 };
409
410 void ReleaseEmptyRunSlotsPagesUnsafe();
411
412 template <bool LOCK_RUN_SLOTS>
413 void FreeUnsafe(void *mem);
414
415 bool FreeUnsafeInternal(RunSlotsType *runslots, void *mem);
416
417 void TrimUnsafe();
418
419 // Return true if this object could be allocated by the RunSlots allocator.
420 // Does not check any live objects bitmap inside.
421 bool AllocatedByRunSlotsAllocator(void *object);
422
423 bool AllocatedByRunSlotsAllocatorUnsafe(void *object);
424
425 template <bool NEED_LOCK = true>
426 RunSlotsType *CreateNewRunSlotsFromMemory(size_t slotsSize);
427
428 // Add one to the array size to just use the size (power of two) for RunSlots list without any modifications
429 static constexpr size_t SLOTS_SIZES_VARIANTS = RunSlotsType::SlotSizesVariants() + 1;
430
431 std::array<RunSlotsList, SLOTS_SIZES_VARIANTS> runslots_;
432
433 // Add totally free RunSlots in this list for possibility to reuse them with different element sizes.
434 RunSlotsList freeRunslots_;
435
436 MemPoolManager memoryPool_;
437 SpaceType typeAllocation_;
438
439 MemStatsType *memStats_;
440
441 #ifdef PANDA_MEASURE_FRAGMENTATION
442 std::atomic_size_t allocatedBytes_ {0};
443 #endif
444
445 template <typename T>
446 friend class PygoteSpaceAllocator;
447 friend class RunSlotsAllocatorTest;
448 template <InternalAllocatorConfig CONFIG>
449 friend class InternalAllocator;
450 };
451
452 template <typename AllocConfigT, typename LockConfigT>
453 template <typename T>
AllocArray(size_t arrLength)454 T *RunSlotsAllocator<AllocConfigT, LockConfigT>::AllocArray(size_t arrLength)
455 {
456 // NOTE(aemelenko): Very dirty hack. If you want to fix it, you must change RunSlotsAllocatorAdapter::max_size() too
457 return static_cast<T *>(Alloc(sizeof(T) * arrLength));
458 }
459
460 #undef LOG_RUNSLOTS_ALLOCATOR
461
462 } // namespace ark::mem
463
464 #endif // PANDA_RUNTIME_MEM_RUNSLOTS_ALLOCATOR_H
465