1 /**
2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <sys/mman.h>
17
18 #include "libpandabase/os/mem.h"
19 #include "libpandabase/utils/logger.h"
20 #include "runtime/mem/runslots_allocator-inl.h"
21 #include "runtime/mem/runslots_allocator_stl_adapter.h"
22 #include "runtime/tests/allocator_test_base.h"
23
24 namespace panda::mem {
25
26 using NonObjectAllocator = RunSlotsAllocator<EmptyAllocConfigWithCrossingMap>;
27 using RunSlotsType = RunSlots<>;
28
29 class RunSlotsAllocatorTest : public AllocatorTest<NonObjectAllocator> {
30 public:
RunSlotsAllocatorTest()31 RunSlotsAllocatorTest()
32 {
33 // Logger::InitializeStdLogging(Logger::Level::DEBUG, Logger::Component::ALL);
34 }
35
~RunSlotsAllocatorTest()36 ~RunSlotsAllocatorTest()
37 {
38 for (auto i : allocated_mem_mmap_) {
39 panda::os::mem::UnmapRaw(std::get<0>(i), std::get<1>(i));
40 }
41 // Logger::Destroy();
42 }
43
44 protected:
45 static constexpr size_t DEFAULT_POOL_SIZE_FOR_ALLOC = NonObjectAllocator::GetMinPoolSize();
46 static constexpr size_t DEFAULT_POOL_ALIGNMENT_FOR_ALLOC = RUNSLOTS_ALIGNMENT_IN_BYTES;
47 static constexpr Alignment RUNSLOTS_LOG_MAX_ALIGN = LOG_ALIGN_8;
48
AddMemoryPoolToAllocator(NonObjectAllocator & alloc)49 void AddMemoryPoolToAllocator(NonObjectAllocator &alloc) override
50 {
51 os::memory::LockHolder lock(pool_lock_);
52 void *mem = panda::os::mem::MapRWAnonymousRaw(DEFAULT_POOL_SIZE_FOR_ALLOC);
53 std::pair<void *, size_t> new_pair {mem, DEFAULT_POOL_SIZE_FOR_ALLOC};
54 allocated_mem_mmap_.push_back(new_pair);
55 if (!alloc.AddMemoryPool(mem, DEFAULT_POOL_SIZE_FOR_ALLOC)) {
56 ASSERT_TRUE(0 && "Can't add mem pool to allocator");
57 }
58 }
59
AddMemoryPoolToAllocatorProtected(NonObjectAllocator & alloc)60 void AddMemoryPoolToAllocatorProtected(NonObjectAllocator &alloc) override
61 {
62 os::memory::LockHolder lock(pool_lock_);
63 void *mem = panda::os::mem::MapRWAnonymousRaw(DEFAULT_POOL_SIZE_FOR_ALLOC + PAGE_SIZE);
64 mprotect(reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(mem) + DEFAULT_POOL_SIZE_FOR_ALLOC), PAGE_SIZE,
65 PROT_NONE);
66 std::pair<void *, size_t> new_pair {mem, DEFAULT_POOL_SIZE_FOR_ALLOC + PAGE_SIZE};
67 allocated_mem_mmap_.push_back(new_pair);
68 if (!alloc.AddMemoryPool(mem, DEFAULT_POOL_SIZE_FOR_ALLOC)) {
69 ASSERT_TRUE(0 && "Can't add mem pool to allocator");
70 }
71 }
72
ReleasePages(NonObjectAllocator & alloc)73 void ReleasePages(NonObjectAllocator &alloc)
74 {
75 alloc.ReleaseEmptyRunSlotsPagesUnsafe();
76 }
77
AllocatedByThisAllocator(NonObjectAllocator & allocator,void * mem)78 bool AllocatedByThisAllocator(NonObjectAllocator &allocator, void *mem) override
79 {
80 return allocator.AllocatedByRunSlotsAllocator(mem);
81 }
82
TestRunSlots(size_t slots_size)83 void TestRunSlots(size_t slots_size)
84 {
85 LOG(DEBUG, ALLOC) << "Test RunSlots with size " << slots_size;
86 void *mem = aligned_alloc(RUNSLOTS_ALIGNMENT_IN_BYTES, RUNSLOTS_SIZE);
87 auto runslots = reinterpret_cast<RunSlotsType *>(mem);
88 runslots->Initialize(slots_size, ToUintPtr(mem), true);
89 int i = 0;
90 while (runslots->PopFreeSlot()) {
91 i++;
92 }
93 free(mem);
94 LOG(DEBUG, ALLOC) << "Iteration = " << i;
95 }
96
97 std::vector<std::pair<void *, size_t>> allocated_mem_mmap_;
98 // Mutex, which allows only one thread to add pool to the pool vector
99 os::memory::Mutex pool_lock_;
100 };
101
TEST_F(RunSlotsAllocatorTest,SimpleRunSlotsTest)102 TEST_F(RunSlotsAllocatorTest, SimpleRunSlotsTest)
103 {
104 for (size_t i = RunSlotsType::ConvertToPowerOfTwoUnsafe(RunSlotsType::MinSlotSize());
105 i <= RunSlotsType::ConvertToPowerOfTwoUnsafe(RunSlotsType::MaxSlotSize()); i++) {
106 TestRunSlots(1U << i);
107 }
108 }
109
TEST_F(RunSlotsAllocatorTest,SimpleAllocateDifferentObjSizeTest)110 TEST_F(RunSlotsAllocatorTest, SimpleAllocateDifferentObjSizeTest)
111 {
112 LOG(DEBUG, ALLOC) << "SimpleAllocateDifferentObjSizeTest";
113 mem::MemStatsType mem_stats;
114 NonObjectAllocator allocator(&mem_stats);
115 AddMemoryPoolToAllocator(allocator);
116 for (size_t i = 23; i < 300; i++) {
117 void *mem = allocator.Alloc(i);
118 LOG(DEBUG, ALLOC) << "Allocate obj with size " << i << " at " << std::hex << mem;
119 }
120 }
121
TEST_F(RunSlotsAllocatorTest,TestReleaseRunSlotsPagesTest)122 TEST_F(RunSlotsAllocatorTest, TestReleaseRunSlotsPagesTest)
123 {
124 static constexpr size_t ALLOC_SIZE = RunSlotsType::ConvertToPowerOfTwoUnsafe(RunSlotsType::MinSlotSize());
125 LOG(DEBUG, ALLOC) << "TestRunSlotsReusageTestTest";
126 mem::MemStatsType mem_stats;
127 NonObjectAllocator allocator(&mem_stats);
128 AddMemoryPoolToAllocator(allocator);
129 std::vector<void *> elements;
130 // Fill the whole pool
131 while (true) {
132 void *mem = allocator.Alloc(ALLOC_SIZE);
133 if (mem == nullptr) {
134 break;
135 }
136 elements.push_back(mem);
137 LOG(DEBUG, ALLOC) << "Allocate obj with size " << ALLOC_SIZE << " at " << std::hex << mem;
138 }
139 // Free everything except the last element
140 ASSERT(elements.size() > 1);
141 size_t element_to_free_count = elements.size() - 1;
142 for (size_t i = 0; i < element_to_free_count; i++) {
143 allocator.Free(elements.back());
144 elements.pop_back();
145 }
146
147 // ReleaseRunSlotsPages
148 ReleasePages(allocator);
149
150 // Try to allocate everything again
151 for (size_t i = 0; i < element_to_free_count; i++) {
152 void *mem = allocator.Alloc(ALLOC_SIZE);
153 ASSERT_TRUE(mem != nullptr);
154 elements.push_back(mem);
155 LOG(DEBUG, ALLOC) << "Allocate obj with size " << ALLOC_SIZE << " at " << std::hex << mem;
156 }
157
158 // Free everything
159 for (auto i : elements) {
160 allocator.Free(i);
161 }
162 }
163
TEST_F(RunSlotsAllocatorTest,AllocateAllPossibleSizesFreeTest)164 TEST_F(RunSlotsAllocatorTest, AllocateAllPossibleSizesFreeTest)
165 {
166 for (size_t i = 1; i <= RunSlotsType::MaxSlotSize(); i++) {
167 AllocateAndFree(i, RUNSLOTS_SIZE / i);
168 }
169 }
170
TEST_F(RunSlotsAllocatorTest,AllocateWriteFreeTest)171 TEST_F(RunSlotsAllocatorTest, AllocateWriteFreeTest)
172 {
173 AllocateAndFree(sizeof(uint64_t), 512);
174 }
175
TEST_F(RunSlotsAllocatorTest,AllocateRandomFreeTest)176 TEST_F(RunSlotsAllocatorTest, AllocateRandomFreeTest)
177 {
178 static constexpr size_t ALLOC_SIZE = sizeof(uint64_t);
179 static constexpr size_t ELEMENTS_COUNT = 512;
180 static constexpr size_t POOLS_COUNT = 1;
181 AllocateFreeDifferentSizesTest<ALLOC_SIZE / 2, 2 * ALLOC_SIZE>(ELEMENTS_COUNT, POOLS_COUNT);
182 }
183
TEST_F(RunSlotsAllocatorTest,CheckReuseOfRunSlotsTest)184 TEST_F(RunSlotsAllocatorTest, CheckReuseOfRunSlotsTest)
185 {
186 AllocateReuseTest(RUNSLOTS_ALIGNMENT_MASK);
187 }
188
TEST_F(RunSlotsAllocatorTest,AllocateTooBigObjTest)189 TEST_F(RunSlotsAllocatorTest, AllocateTooBigObjTest)
190 {
191 AllocateTooBigObjectTest<RunSlotsType::MaxSlotSize()>();
192 }
193
TEST_F(RunSlotsAllocatorTest,AlignmentAllocTest)194 TEST_F(RunSlotsAllocatorTest, AlignmentAllocTest)
195 {
196 AlignedAllocFreeTest<1, RunSlotsType::MaxSlotSize(), LOG_ALIGN_MIN, RUNSLOTS_LOG_MAX_ALIGN>();
197 }
198
TEST_F(RunSlotsAllocatorTest,AllocateTooMuchTest)199 TEST_F(RunSlotsAllocatorTest, AllocateTooMuchTest)
200 {
201 static constexpr size_t ALLOC_SIZE = sizeof(uint64_t);
202 AllocateTooMuchTest(ALLOC_SIZE, DEFAULT_POOL_SIZE_FOR_ALLOC / ALLOC_SIZE);
203 }
204
TEST_F(RunSlotsAllocatorTest,AllocateVectorTest)205 TEST_F(RunSlotsAllocatorTest, AllocateVectorTest)
206 {
207 AllocateVectorTest();
208 }
209
TEST_F(RunSlotsAllocatorTest,AllocateReuse2)210 TEST_F(RunSlotsAllocatorTest, AllocateReuse2)
211 {
212 // It's regression test
213 mem::MemStatsType *mem_stats = new mem::MemStatsType();
214 NonObjectAllocator allocator(mem_stats);
215 static constexpr size_t size1 = 60;
216 static constexpr size_t size2 = 204;
217 constexpr char char1 = 'a';
218 constexpr char char2 = 'b';
219 constexpr char char3 = 'c';
220 constexpr char char4 = 'd';
221 constexpr char char5 = 'e';
222 constexpr char char6 = 'f';
223 AddMemoryPoolToAllocatorProtected(allocator);
224 auto fillStr = [](char *str, char c, size_t size) {
225 for (size_t i = 0; i < size - 1; i++) {
226 str[i] = c;
227 }
228 str[size - 1] = 0;
229 };
230 auto checkStr = [](char *str, char c, size_t size) {
231 for (size_t i = 0; i < size - 1; i++) {
232 if (str[i] != c) {
233 return false;
234 }
235 }
236 return true;
237 };
238 char *strA = reinterpret_cast<char *>(allocator.Alloc(size1));
239 char *strB = reinterpret_cast<char *>(allocator.Alloc(size1));
240 char *strC = reinterpret_cast<char *>(allocator.Alloc(size1));
241 fillStr(strA, char1, size1);
242 fillStr(strB, char2, size1);
243 fillStr(strC, char3, size1);
244 ASSERT_TRUE(checkStr(strA, char1, size1));
245 ASSERT_TRUE(checkStr(strB, char2, size1));
246 ASSERT_TRUE(checkStr(strC, char3, size1));
247 allocator.Free(static_cast<void *>(strA));
248 allocator.Free(static_cast<void *>(strB));
249 allocator.Free(static_cast<void *>(strC));
250 char *strD = reinterpret_cast<char *>(allocator.Alloc(size2));
251 char *strE = reinterpret_cast<char *>(allocator.Alloc(size2));
252 char *strF = reinterpret_cast<char *>(allocator.Alloc(size2));
253 fillStr(strD, char4, size2);
254 fillStr(strE, char5, size2);
255 fillStr(strF, char6, size2);
256 ASSERT_TRUE(checkStr(strD, char4, size2));
257 ASSERT_TRUE(checkStr(strE, char5, size2));
258 ASSERT_TRUE(checkStr(strF, char6, size2));
259 delete mem_stats;
260 }
261
TEST_F(RunSlotsAllocatorTest,ObjectIteratorTest)262 TEST_F(RunSlotsAllocatorTest, ObjectIteratorTest)
263 {
264 ObjectIteratorTest<1, RunSlotsType::MaxSlotSize(), LOG_ALIGN_MIN, RUNSLOTS_LOG_MAX_ALIGN>();
265 }
266
TEST_F(RunSlotsAllocatorTest,ObjectCollectionTest)267 TEST_F(RunSlotsAllocatorTest, ObjectCollectionTest)
268 {
269 ObjectCollectionTest<1, RunSlotsType::MaxSlotSize(), LOG_ALIGN_MIN, RUNSLOTS_LOG_MAX_ALIGN>();
270 }
271
TEST_F(RunSlotsAllocatorTest,ObjectIteratorInRangeTest)272 TEST_F(RunSlotsAllocatorTest, ObjectIteratorInRangeTest)
273 {
274 ObjectIteratorInRangeTest<1, RunSlotsType::MaxSlotSize(), LOG_ALIGN_MIN, RUNSLOTS_LOG_MAX_ALIGN>(
275 CrossingMapSingleton::GetCrossingMapGranularity());
276 }
277
TEST_F(RunSlotsAllocatorTest,AsanTest)278 TEST_F(RunSlotsAllocatorTest, AsanTest)
279 {
280 AsanTest();
281 }
282
TEST_F(RunSlotsAllocatorTest,VisitAndRemoveFreePoolsTest)283 TEST_F(RunSlotsAllocatorTest, VisitAndRemoveFreePoolsTest)
284 {
285 static constexpr size_t POOLS_COUNT = 5;
286 VisitAndRemoveFreePools<POOLS_COUNT>(RunSlotsType::MaxSlotSize());
287 }
288
TEST_F(RunSlotsAllocatorTest,AllocatedByRunSlotsAllocatorTest)289 TEST_F(RunSlotsAllocatorTest, AllocatedByRunSlotsAllocatorTest)
290 {
291 AllocatedByThisAllocatorTest();
292 }
293
TEST_F(RunSlotsAllocatorTest,RunSlotsReusingTest)294 TEST_F(RunSlotsAllocatorTest, RunSlotsReusingTest)
295 {
296 static constexpr size_t SMALL_OBJ_SIZE = sizeof(uint32_t);
297 static constexpr size_t BIG_OBJ_SIZE = 128;
298 mem::MemStatsType *mem_stats = new mem::MemStatsType();
299 NonObjectAllocator allocator(mem_stats);
300 AddMemoryPoolToAllocatorProtected(allocator);
301 // Alloc one big object. this must cause runslots init with it size
302 void *mem = allocator.Alloc(BIG_OBJ_SIZE);
303 // Free this object
304 allocator.Free(mem);
305
306 // Alloc small object. We must reuse already allocated and freed RunSlots
307 void *small_obj_mem = allocator.Alloc(SMALL_OBJ_SIZE);
308 size_t small_obj_index = SetBytesFromByteArray(small_obj_mem, SMALL_OBJ_SIZE);
309
310 // Alloc big obj again.
311 void *big_obj_mem = allocator.Alloc(BIG_OBJ_SIZE);
312 size_t big_obj_index = SetBytesFromByteArray(big_obj_mem, BIG_OBJ_SIZE);
313
314 // Alloc one more small object.
315 void *second_small_obj_mem = allocator.Alloc(SMALL_OBJ_SIZE);
316 size_t second_small_obj_index = SetBytesFromByteArray(second_small_obj_mem, SMALL_OBJ_SIZE);
317
318 ASSERT_TRUE(CompareBytesWithByteArray(big_obj_mem, BIG_OBJ_SIZE, big_obj_index));
319 ASSERT_TRUE(CompareBytesWithByteArray(small_obj_mem, SMALL_OBJ_SIZE, small_obj_index));
320 ASSERT_TRUE(CompareBytesWithByteArray(second_small_obj_mem, SMALL_OBJ_SIZE, second_small_obj_index));
321 delete mem_stats;
322 }
323
TEST_F(RunSlotsAllocatorTest,MTAllocFreeTest)324 TEST_F(RunSlotsAllocatorTest, MTAllocFreeTest)
325 {
326 static constexpr size_t MIN_ELEMENTS_COUNT = 1500;
327 static constexpr size_t MAX_ELEMENTS_COUNT = 3000;
328 #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32)
329 // We have an issue with QEMU during MT tests. Issue 2852
330 static constexpr size_t THREADS_COUNT = 1;
331 #else
332 static constexpr size_t THREADS_COUNT = 10;
333 #endif
334 static constexpr size_t MT_TEST_RUN_COUNT = 5;
335 for (size_t i = 0; i < MT_TEST_RUN_COUNT; i++) {
336 MT_AllocFreeTest<1, RunSlotsType::MaxSlotSize(), THREADS_COUNT>(MIN_ELEMENTS_COUNT, MAX_ELEMENTS_COUNT);
337 }
338 }
339
TEST_F(RunSlotsAllocatorTest,MTAllocIterateTest)340 TEST_F(RunSlotsAllocatorTest, MTAllocIterateTest)
341 {
342 static constexpr size_t MIN_ELEMENTS_COUNT = 1500;
343 static constexpr size_t MAX_ELEMENTS_COUNT = 3000;
344 #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32)
345 // We have an issue with QEMU during MT tests. Issue 2852
346 static constexpr size_t THREADS_COUNT = 1;
347 #else
348 static constexpr size_t THREADS_COUNT = 10;
349 #endif
350 static constexpr size_t MT_TEST_RUN_COUNT = 5;
351 for (size_t i = 0; i < MT_TEST_RUN_COUNT; i++) {
352 MT_AllocIterateTest<1, RunSlotsType::MaxSlotSize(), THREADS_COUNT>(
353 MIN_ELEMENTS_COUNT, MAX_ELEMENTS_COUNT, CrossingMapSingleton::GetCrossingMapGranularity());
354 }
355 }
356
TEST_F(RunSlotsAllocatorTest,MTAllocCollectTest)357 TEST_F(RunSlotsAllocatorTest, MTAllocCollectTest)
358 {
359 static constexpr size_t MIN_ELEMENTS_COUNT = 1500;
360 static constexpr size_t MAX_ELEMENTS_COUNT = 3000;
361 #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32)
362 // We have an issue with QEMU during MT tests. Issue 2852
363 static constexpr size_t THREADS_COUNT = 1;
364 #else
365 static constexpr size_t THREADS_COUNT = 10;
366 #endif
367 static constexpr size_t MT_TEST_RUN_COUNT = 5;
368 for (size_t i = 0; i < MT_TEST_RUN_COUNT; i++) {
369 MT_AllocCollectTest<1, RunSlotsType::MaxSlotSize(), THREADS_COUNT>(MIN_ELEMENTS_COUNT, MAX_ELEMENTS_COUNT);
370 }
371 }
372
373 } // namespace panda::mem
374