1 /**
2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <sys/mman.h>
17
18 #include "libpandabase/mem/mem.h"
19 #include "libpandabase/os/mem.h"
20 #include "libpandabase/utils/asan_interface.h"
21 #include "libpandabase/utils/logger.h"
22 #include "libpandabase/utils/math_helpers.h"
23 #include "runtime/include/runtime.h"
24 #include "runtime/mem/alloc_config.h"
25 #include "runtime/mem/freelist_allocator-inl.h"
26 #include "runtime/tests/allocator_test_base.h"
27
28 namespace panda::mem {
29
30 using NonObjectFreeListAllocator = FreeListAllocator<EmptyAllocConfigWithCrossingMap>;
31
32 class FreeListAllocatorTest : public AllocatorTest<NonObjectFreeListAllocator> {
33 public:
FreeListAllocatorTest()34 FreeListAllocatorTest()
35 {
36 // Logger::InitializeStdLogging(Logger::Level::DEBUG, Logger::Component::ALL);
37 // We need to create a runtime instance to be able to use CrossingMap.
38 options_.SetShouldLoadBootPandaFiles(false);
39 options_.SetShouldInitializeIntrinsics(false);
40 Runtime::Create(options_);
41 thread_ = panda::MTManagedThread::GetCurrent();
42 thread_->ManagedCodeBegin();
43 if (!CrossingMapSingleton::IsCreated()) {
44 CrossingMapSingleton::Create();
45 crossingmap_manual_handling_ = true;
46 }
47 }
48
~FreeListAllocatorTest()49 ~FreeListAllocatorTest()
50 {
51 thread_->ManagedCodeEnd();
52 ClearPoolManager();
53 if (crossingmap_manual_handling_) {
54 CrossingMapSingleton::Destroy();
55 }
56 Runtime::Destroy();
57 // Logger::Destroy();
58 }
59
60 protected:
61 panda::MTManagedThread *thread_;
62 static constexpr size_t DEFAULT_POOL_SIZE_FOR_ALLOC = NonObjectFreeListAllocator::GetMinPoolSize();
63 static constexpr size_t DEFAULT_POOL_ALIGNMENT_FOR_ALLOC = FREELIST_DEFAULT_ALIGNMENT;
64 static constexpr size_t POOL_HEADER_SIZE = sizeof(NonObjectFreeListAllocator::MemoryPoolHeader);
65 static constexpr size_t MAX_ALLOC_SIZE = NonObjectFreeListAllocator::GetMaxSize();
66
AddMemoryPoolToAllocator(NonObjectFreeListAllocator & alloc)67 void AddMemoryPoolToAllocator(NonObjectFreeListAllocator &alloc) override
68 {
69 os::memory::LockHolder lock(pool_lock_);
70 Pool pool = PoolManager::GetMmapMemPool()->AllocPool(DEFAULT_POOL_SIZE_FOR_ALLOC, SpaceType::SPACE_TYPE_OBJECT,
71 AllocatorType::FREELIST_ALLOCATOR, &alloc);
72 ASSERT(pool.GetSize() == DEFAULT_POOL_SIZE_FOR_ALLOC);
73 if (pool.GetMem() == nullptr) {
74 ASSERT_TRUE(0 && "Can't get a new pool from PoolManager");
75 }
76 allocated_pools_by_pool_manager_.push_back(pool);
77 if (!alloc.AddMemoryPool(pool.GetMem(), pool.GetSize())) {
78 ASSERT_TRUE(0 && "Can't add mem pool to allocator");
79 }
80 }
81
AddMemoryPoolToAllocatorProtected(NonObjectFreeListAllocator & alloc)82 void AddMemoryPoolToAllocatorProtected(NonObjectFreeListAllocator &alloc) override
83 {
84 // We use common PoolManager from Runtime. Therefore, we have the same pool allocation for both cases.
85 AddMemoryPoolToAllocator(alloc);
86 }
87
AllocatedByThisAllocator(NonObjectFreeListAllocator & allocator,void * mem)88 bool AllocatedByThisAllocator(NonObjectFreeListAllocator &allocator, void *mem) override
89 {
90 return allocator.AllocatedByFreeListAllocator(mem);
91 }
92
ClearPoolManager(bool clear_crossing_map=false)93 void ClearPoolManager(bool clear_crossing_map = false)
94 {
95 for (auto i : allocated_pools_by_pool_manager_) {
96 PoolManager::GetMmapMemPool()->FreePool(i.GetMem(), i.GetSize());
97 if (clear_crossing_map) {
98 // We need to remove corresponding Pools from the CrossingMap
99 CrossingMapSingleton::RemoveCrossingMapForMemory(i.GetMem(), i.GetSize());
100 }
101 }
102 allocated_pools_by_pool_manager_.clear();
103 }
104
105 std::vector<Pool> allocated_pools_by_pool_manager_;
106 RuntimeOptions options_;
107 bool crossingmap_manual_handling_ {false};
108 // Mutex, which allows only one thread to add pool to the pool vector
109 os::memory::Mutex pool_lock_;
110 };
111
TEST_F(FreeListAllocatorTest,SimpleAllocateDifferentObjSizeTest)112 TEST_F(FreeListAllocatorTest, SimpleAllocateDifferentObjSizeTest)
113 {
114 LOG(DEBUG, ALLOC) << "SimpleAllocateDifferentObjSizeTest";
115 mem::MemStatsType *mem_stats = new mem::MemStatsType();
116 NonObjectFreeListAllocator allocator(mem_stats);
117 AddMemoryPoolToAllocator(allocator);
118 for (size_t i = 23; i < 300; i++) {
119 void *mem = allocator.Alloc(i);
120 LOG(DEBUG, ALLOC) << "Allocate obj with size " << i << " at " << std::hex << mem;
121 }
122 delete mem_stats;
123 }
124
TEST_F(FreeListAllocatorTest,AllocateWriteFreeTest)125 TEST_F(FreeListAllocatorTest, AllocateWriteFreeTest)
126 {
127 AllocateAndFree(FREELIST_ALLOCATOR_MIN_SIZE, 512);
128 }
129
TEST_F(FreeListAllocatorTest,AllocateRandomFreeTest)130 TEST_F(FreeListAllocatorTest, AllocateRandomFreeTest)
131 {
132 static constexpr size_t ALLOC_SIZE = FREELIST_ALLOCATOR_MIN_SIZE;
133 static constexpr size_t ELEMENTS_COUNT = 512;
134 static constexpr size_t POOLS_COUNT = 1;
135 AllocateFreeDifferentSizesTest<ALLOC_SIZE, 2 * ALLOC_SIZE>(ELEMENTS_COUNT, POOLS_COUNT);
136 }
137
TEST_F(FreeListAllocatorTest,AllocateTooBigObjTest)138 TEST_F(FreeListAllocatorTest, AllocateTooBigObjTest)
139 {
140 AllocateTooBigObjectTest<MAX_ALLOC_SIZE + 1>();
141 }
142
TEST_F(FreeListAllocatorTest,AlignmentAllocTest)143 TEST_F(FreeListAllocatorTest, AlignmentAllocTest)
144 {
145 static constexpr size_t POOLS_COUNT = 2;
146 AlignedAllocFreeTest<FREELIST_ALLOCATOR_MIN_SIZE, MAX_ALLOC_SIZE / 4096>(POOLS_COUNT);
147 }
148
TEST_F(FreeListAllocatorTest,AllocateTooMuchTest)149 TEST_F(FreeListAllocatorTest, AllocateTooMuchTest)
150 {
151 static constexpr size_t ALLOC_SIZE = FREELIST_ALLOCATOR_MIN_SIZE;
152 AllocateTooMuchTest(ALLOC_SIZE, DEFAULT_POOL_SIZE_FOR_ALLOC / ALLOC_SIZE);
153 }
154
TEST_F(FreeListAllocatorTest,ObjectIteratorTest)155 TEST_F(FreeListAllocatorTest, ObjectIteratorTest)
156 {
157 ObjectIteratorTest<FREELIST_ALLOCATOR_MIN_SIZE, MAX_ALLOC_SIZE>();
158 }
159
TEST_F(FreeListAllocatorTest,ObjectCollectionTest)160 TEST_F(FreeListAllocatorTest, ObjectCollectionTest)
161 {
162 ObjectCollectionTest<FREELIST_ALLOCATOR_MIN_SIZE, MAX_ALLOC_SIZE>();
163 }
164
TEST_F(FreeListAllocatorTest,ObjectIteratorInRangeTest)165 TEST_F(FreeListAllocatorTest, ObjectIteratorInRangeTest)
166 {
167 ObjectIteratorInRangeTest<FREELIST_ALLOCATOR_MIN_SIZE, MAX_ALLOC_SIZE>(
168 CrossingMapSingleton::GetCrossingMapGranularity());
169 }
170
TEST_F(FreeListAllocatorTest,AsanTest)171 TEST_F(FreeListAllocatorTest, AsanTest)
172 {
173 AsanTest();
174 }
175
TEST_F(FreeListAllocatorTest,VisitAndRemoveFreePoolsTest)176 TEST_F(FreeListAllocatorTest, VisitAndRemoveFreePoolsTest)
177 {
178 static constexpr size_t POOLS_COUNT = 5;
179 VisitAndRemoveFreePools<POOLS_COUNT>(MAX_ALLOC_SIZE);
180 }
181
TEST_F(FreeListAllocatorTest,AllocatedByFreeListAllocatorTest)182 TEST_F(FreeListAllocatorTest, AllocatedByFreeListAllocatorTest)
183 {
184 AllocatedByThisAllocatorTest();
185 }
186
TEST_F(FreeListAllocatorTest,FailedLinksTest)187 TEST_F(FreeListAllocatorTest, FailedLinksTest)
188 {
189 static constexpr size_t min_alloc_size = FREELIST_ALLOCATOR_MIN_SIZE;
190 mem::MemStatsType *mem_stats = new mem::MemStatsType();
191 NonObjectFreeListAllocator allocator(mem_stats);
192 AddMemoryPoolToAllocator(allocator);
193 std::pair<void *, size_t> pair;
194
195 std::array<std::pair<void *, size_t>, 3> memory_elements;
196 for (size_t i = 0; i < 3; i++) {
197 void *mem = allocator.Alloc(min_alloc_size);
198 ASSERT_TRUE(mem != nullptr);
199 size_t index = SetBytesFromByteArray(mem, min_alloc_size);
200 std::pair<void *, size_t> new_pair(mem, index);
201 memory_elements.at(i) = new_pair;
202 }
203
204 pair = memory_elements[1];
205 ASSERT_TRUE(CompareBytesWithByteArray(std::get<0>(pair), min_alloc_size, std::get<1>(pair)));
206 allocator.Free(std::get<0>(pair));
207
208 pair = memory_elements[0];
209 ASSERT_TRUE(CompareBytesWithByteArray(std::get<0>(pair), min_alloc_size, std::get<1>(pair)));
210 allocator.Free(std::get<0>(pair));
211
212 {
213 void *mem = allocator.Alloc(min_alloc_size * 2);
214 ASSERT_TRUE(mem != nullptr);
215 size_t index = SetBytesFromByteArray(mem, min_alloc_size * 2);
216 std::pair<void *, size_t> new_pair(mem, index);
217 memory_elements.at(0) = new_pair;
218 }
219
220 {
221 void *mem = allocator.Alloc(min_alloc_size);
222 ASSERT_TRUE(mem != nullptr);
223 size_t index = SetBytesFromByteArray(mem, min_alloc_size);
224 std::pair<void *, size_t> new_pair(mem, index);
225 memory_elements.at(1) = new_pair;
226 }
227
228 {
229 pair = memory_elements[0];
230 ASSERT_TRUE(CompareBytesWithByteArray(std::get<0>(pair), min_alloc_size * 2, std::get<1>(pair)));
231 allocator.Free(std::get<0>(pair));
232 }
233
234 {
235 pair = memory_elements[1];
236 ASSERT_TRUE(CompareBytesWithByteArray(std::get<0>(pair), min_alloc_size, std::get<1>(pair)));
237 allocator.Free(std::get<0>(pair));
238 }
239
240 {
241 pair = memory_elements[2];
242 ASSERT_TRUE(CompareBytesWithByteArray(std::get<0>(pair), min_alloc_size, std::get<1>(pair)));
243 allocator.Free(std::get<0>(pair));
244 }
245 delete mem_stats;
246 }
247
TEST_F(FreeListAllocatorTest,MaxAllocationSizeTest)248 TEST_F(FreeListAllocatorTest, MaxAllocationSizeTest)
249 {
250 static constexpr size_t alloc_size = MAX_ALLOC_SIZE;
251 static constexpr size_t alloc_count = 2;
252 mem::MemStatsType *mem_stats = new mem::MemStatsType();
253 NonObjectFreeListAllocator allocator(mem_stats);
254 AddMemoryPoolToAllocator(allocator);
255 std::array<void *, alloc_count> memory_elements;
256 for (size_t i = 0; i < alloc_count; i++) {
257 void *mem = allocator.Alloc(alloc_size);
258 ASSERT_TRUE(mem != nullptr);
259 memory_elements.at(i) = mem;
260 }
261 for (size_t i = 0; i < alloc_count; i++) {
262 allocator.Free(memory_elements.at(i));
263 }
264 delete mem_stats;
265 }
266
TEST_F(FreeListAllocatorTest,AllocateTheWholePoolFreeAndAllocateAgainTest)267 TEST_F(FreeListAllocatorTest, AllocateTheWholePoolFreeAndAllocateAgainTest)
268 {
269 size_t min_size_power_of_two;
270 if ((FREELIST_ALLOCATOR_MIN_SIZE & (FREELIST_ALLOCATOR_MIN_SIZE - 1)) == 0U) {
271 min_size_power_of_two = panda::helpers::math::GetIntLog2(FREELIST_ALLOCATOR_MIN_SIZE);
272 } else {
273 min_size_power_of_two = ceil(std::log(FREELIST_ALLOCATOR_MIN_SIZE) / std::log(2));
274 }
275 if (((1 << min_size_power_of_two) - sizeof(freelist::MemoryBlockHeader)) < FREELIST_ALLOCATOR_MIN_SIZE) {
276 min_size_power_of_two++;
277 }
278 size_t alloc_size = (1 << min_size_power_of_two) - sizeof(freelist::MemoryBlockHeader);
279 // To cover all memory we need to consider pool header size at first bytes of pool memory.
280 size_t first_alloc_size = (1 << min_size_power_of_two) - sizeof(freelist::MemoryBlockHeader) - POOL_HEADER_SIZE;
281 if (first_alloc_size < FREELIST_ALLOCATOR_MIN_SIZE) {
282 first_alloc_size = (1 << (min_size_power_of_two + 1)) - sizeof(freelist::MemoryBlockHeader) - POOL_HEADER_SIZE;
283 }
284 mem::MemStatsType *mem_stats = new mem::MemStatsType();
285 NonObjectFreeListAllocator allocator(mem_stats);
286 AddMemoryPoolToAllocator(allocator);
287 std::vector<void *> memory_elements;
288 size_t alloc_count = 0;
289
290 // Allocate first element
291 void *first_alloc_mem = allocator.Alloc(first_alloc_size);
292 ASSERT_TRUE(first_alloc_mem != nullptr);
293
294 // Allocate and use the whole alloc pool
295 while (true) {
296 void *mem = allocator.Alloc(alloc_size);
297 if (mem == nullptr) {
298 break;
299 }
300 alloc_count++;
301 memory_elements.push_back(mem);
302 }
303
304 // Free all elements
305 allocator.Free(first_alloc_mem);
306 for (size_t i = 0; i < alloc_count; i++) {
307 allocator.Free(memory_elements.back());
308 memory_elements.pop_back();
309 }
310
311 // Allocate first element again
312 first_alloc_mem = allocator.Alloc(first_alloc_size);
313 ASSERT_TRUE(first_alloc_mem != nullptr);
314
315 // Allocate again
316 for (size_t i = 0; i < alloc_count; i++) {
317 void *mem = allocator.Alloc(alloc_size);
318 ASSERT_TRUE(mem != nullptr);
319 memory_elements.push_back(mem);
320 }
321
322 // Free all elements again
323 allocator.Free(first_alloc_mem);
324 for (size_t i = 0; i < alloc_count; i++) {
325 allocator.Free(memory_elements.back());
326 memory_elements.pop_back();
327 }
328 delete mem_stats;
329 }
330
TEST_F(FreeListAllocatorTest,MTAllocFreeTest)331 TEST_F(FreeListAllocatorTest, MTAllocFreeTest)
332 {
333 static constexpr size_t MIN_ELEMENTS_COUNT = 500;
334 static constexpr size_t MAX_ELEMENTS_COUNT = 1000;
335 #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32)
336 // We have an issue with QEMU during MT tests. Issue 2852
337 static constexpr size_t THREADS_COUNT = 1;
338 #else
339 static constexpr size_t THREADS_COUNT = 10;
340 #endif
341 static constexpr size_t MAX_MT_ALLOC_SIZE = MAX_ALLOC_SIZE / 128;
342 static constexpr size_t MT_TEST_RUN_COUNT = 5;
343 // Threads can concurrently add Pools to the allocator, therefore, we must make it into account
344 // And also we must take fragmentation into account
345 ASSERT_TRUE(mem::MemConfig::GetHeapSizeLimit() >
346 2 * (AlignUp(MAX_ELEMENTS_COUNT * MAX_MT_ALLOC_SIZE, DEFAULT_POOL_SIZE_FOR_ALLOC)) +
347 THREADS_COUNT * DEFAULT_POOL_SIZE_FOR_ALLOC);
348 for (size_t i = 0; i < MT_TEST_RUN_COUNT; i++) {
349 MT_AllocFreeTest<FREELIST_ALLOCATOR_MIN_SIZE, MAX_MT_ALLOC_SIZE, THREADS_COUNT>(MIN_ELEMENTS_COUNT,
350 MAX_ELEMENTS_COUNT);
351 ClearPoolManager(true);
352 }
353 }
354
TEST_F(FreeListAllocatorTest,MTAllocIterateTest)355 TEST_F(FreeListAllocatorTest, MTAllocIterateTest)
356 {
357 static constexpr size_t MIN_ELEMENTS_COUNT = 500;
358 static constexpr size_t MAX_ELEMENTS_COUNT = 1000;
359 #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32)
360 // We have an issue with QEMU during MT tests. Issue 2852
361 static constexpr size_t THREADS_COUNT = 1;
362 #else
363 static constexpr size_t THREADS_COUNT = 10;
364 #endif
365 static constexpr size_t MAX_MT_ALLOC_SIZE = MAX_ALLOC_SIZE / 128;
366 static constexpr size_t MT_TEST_RUN_COUNT = 5;
367 // Threads can concurrently add Pools to the allocator, therefore, we must make it into account
368 // And also we must take fragmentation into account
369 ASSERT_TRUE(mem::MemConfig::GetHeapSizeLimit() >
370 2 * (AlignUp(MAX_ELEMENTS_COUNT * MAX_MT_ALLOC_SIZE, DEFAULT_POOL_SIZE_FOR_ALLOC)) +
371 THREADS_COUNT * DEFAULT_POOL_SIZE_FOR_ALLOC);
372 for (size_t i = 0; i < MT_TEST_RUN_COUNT; i++) {
373 MT_AllocIterateTest<FREELIST_ALLOCATOR_MIN_SIZE, MAX_MT_ALLOC_SIZE, THREADS_COUNT>(
374 MIN_ELEMENTS_COUNT, MAX_ELEMENTS_COUNT, CrossingMapSingleton::GetCrossingMapGranularity());
375 ClearPoolManager(true);
376 }
377 }
378
TEST_F(FreeListAllocatorTest,MTAllocCollectTest)379 TEST_F(FreeListAllocatorTest, MTAllocCollectTest)
380 {
381 static constexpr size_t MIN_ELEMENTS_COUNT = 500;
382 static constexpr size_t MAX_ELEMENTS_COUNT = 1000;
383 #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32)
384 // We have an issue with QEMU during MT tests. Issue 2852
385 static constexpr size_t THREADS_COUNT = 1;
386 #else
387 static constexpr size_t THREADS_COUNT = 10;
388 #endif
389 static constexpr size_t MAX_MT_ALLOC_SIZE = MAX_ALLOC_SIZE / 128;
390 static constexpr size_t MT_TEST_RUN_COUNT = 5;
391 // Threads can concurrently add Pools to the allocator, therefore, we must make it into account
392 // And also we must take fragmentation into account
393 ASSERT_TRUE(mem::MemConfig::GetHeapSizeLimit() >
394 2 * (AlignUp(MAX_ELEMENTS_COUNT * MAX_MT_ALLOC_SIZE, DEFAULT_POOL_SIZE_FOR_ALLOC)) +
395 THREADS_COUNT * DEFAULT_POOL_SIZE_FOR_ALLOC);
396 for (size_t i = 0; i < MT_TEST_RUN_COUNT; i++) {
397 MT_AllocCollectTest<FREELIST_ALLOCATOR_MIN_SIZE, MAX_MT_ALLOC_SIZE, THREADS_COUNT>(MIN_ELEMENTS_COUNT,
398 MAX_ELEMENTS_COUNT);
399 ClearPoolManager(true);
400 }
401 }
402
403 /*
404 * This test checks that `Free` clears padding status bits in MemoryBlockHeader.
405 * We allocate 4 consecutive blocks and free the 3rd and 1st ones. After it we
406 * allocate block that needs alignment that leads to padding size is saved after
407 * block header and PADDING_STATUS_COMMON_HEADER_WITH_PADDING_SIZE bit is set.
408 * We free this block and allocator overwrites padding size by pointer to the next
409 * free block. Allocator should clear padding status otherwise incorrect padding
410 * will be used and we got heap corruption.
411 */
TEST_F(FreeListAllocatorTest,AlignTest)412 TEST_F(FreeListAllocatorTest, AlignTest)
413 {
414 auto mem_stats = std::unique_ptr<mem::MemStatsType>();
415 NonObjectFreeListAllocator allocator(mem_stats.get());
416 AddMemoryPoolToAllocator(allocator);
417
418 auto *ptr1 = allocator.Alloc(1678U);
419 auto *ptr2 = allocator.Alloc(1678U);
420 auto *ptr3 = allocator.Alloc(1678U);
421 auto *ptr4 = allocator.Alloc(1678U);
422
423 allocator.Free(ptr3);
424 allocator.Free(ptr1);
425
426 auto *ptr5 = allocator.Alloc(1536U, GetLogAlignment(128U));
427 ASSERT_FALSE(IsAligned(reinterpret_cast<uintptr_t>(ptr3), 128U));
428 ASSERT_TRUE(IsAligned(reinterpret_cast<uintptr_t>(ptr5), 128U));
429 allocator.Free(ptr5);
430
431 auto *ptr6 = allocator.Alloc(1272U);
432 // allocations 5 and 6 should use the same memory block. But after free padding bits
433 // should be cleared so pointers will be not equal.
434 ASSERT_TRUE(ptr5 != ptr6);
435 allocator.Free(ptr6);
436
437 allocator.Free(ptr2);
438 allocator.Free(ptr4);
439 }
440
441 } // namespace panda::mem
442