1 /**
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <sys/mman.h>
17
18 #include "libpandabase/os/mem.h"
19 #include "libpandabase/utils/logger.h"
20 #include "runtime/mem/runslots_allocator-inl.h"
21 #include "runtime/mem/runslots_allocator_stl_adapter.h"
22 #include "runtime/tests/allocator_test_base.h"
23
24 namespace ark::mem {
25
26 using NonObjectAllocator = RunSlotsAllocator<EmptyAllocConfigWithCrossingMap>;
27 using RunSlotsType = RunSlots<>;
28
29 class RunSlotsAllocatorTest : public AllocatorTest<NonObjectAllocator> {
30 public:
31 NO_COPY_SEMANTIC(RunSlotsAllocatorTest);
32 NO_MOVE_SEMANTIC(RunSlotsAllocatorTest);
33
34 // NOLINTNEXTLINE(modernize-use-equals-default)
RunSlotsAllocatorTest()35 RunSlotsAllocatorTest()
36 {
37 // Logger::InitializeStdLogging(Logger::Level::DEBUG, Logger::Component::ALL);
38 }
39
~RunSlotsAllocatorTest()40 ~RunSlotsAllocatorTest() override
41 {
42 for (auto i : allocatedMemMmap_) {
43 ark::os::mem::UnmapRaw(std::get<0>(i), std::get<1>(i));
44 }
45 // Logger::Destroy();
46 }
47
48 protected:
49 static constexpr size_t DEFAULT_POOL_SIZE_FOR_ALLOC = NonObjectAllocator::GetMinPoolSize();
50 static constexpr size_t DEFAULT_POOL_ALIGNMENT_FOR_ALLOC = RUNSLOTS_ALIGNMENT_IN_BYTES;
51 static constexpr Alignment RUNSLOTS_LOG_MAX_ALIGN = LOG_ALIGN_8;
52
AddMemoryPoolToAllocator(NonObjectAllocator & alloc)53 void AddMemoryPoolToAllocator(NonObjectAllocator &alloc) override
54 {
55 os::memory::LockHolder lock(poolLock_);
56 void *mem = ark::os::mem::MapRWAnonymousRaw(DEFAULT_POOL_SIZE_FOR_ALLOC);
57 std::pair<void *, size_t> newPair {mem, DEFAULT_POOL_SIZE_FOR_ALLOC};
58 allocatedMemMmap_.push_back(newPair);
59 if (!alloc.AddMemoryPool(mem, DEFAULT_POOL_SIZE_FOR_ALLOC)) {
60 ASSERT_TRUE(0 && "Can't add mem pool to allocator");
61 }
62 }
63
AddMemoryPoolToAllocatorProtected(NonObjectAllocator & alloc)64 void AddMemoryPoolToAllocatorProtected(NonObjectAllocator &alloc) override
65 {
66 os::memory::LockHolder lock(poolLock_);
67 void *mem = ark::os::mem::MapRWAnonymousRaw(DEFAULT_POOL_SIZE_FOR_ALLOC + PAGE_SIZE);
68 mprotect(reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(mem) + DEFAULT_POOL_SIZE_FOR_ALLOC), PAGE_SIZE,
69 PROT_NONE);
70 std::pair<void *, size_t> newPair {mem, DEFAULT_POOL_SIZE_FOR_ALLOC + PAGE_SIZE};
71 allocatedMemMmap_.push_back(newPair);
72 if (!alloc.AddMemoryPool(mem, DEFAULT_POOL_SIZE_FOR_ALLOC)) {
73 ASSERT_TRUE(0 && "Can't add mem pool to allocator");
74 }
75 }
76
ReleasePages(NonObjectAllocator & alloc)77 void ReleasePages(NonObjectAllocator &alloc)
78 {
79 alloc.ReleaseEmptyRunSlotsPagesUnsafe();
80 }
81
AllocatedByThisAllocator(NonObjectAllocator & allocator,void * mem)82 bool AllocatedByThisAllocator(NonObjectAllocator &allocator, void *mem) override
83 {
84 return allocator.AllocatedByRunSlotsAllocator(mem);
85 }
86
TestRunSlots(size_t slotsSize)87 void TestRunSlots(size_t slotsSize)
88 {
89 LOG(DEBUG, ALLOC) << "Test RunSlots with size " << slotsSize;
90 void *mem = aligned_alloc(RUNSLOTS_ALIGNMENT_IN_BYTES, RUNSLOTS_SIZE);
91 auto runslots = reinterpret_cast<RunSlotsType *>(mem);
92 runslots->Initialize(slotsSize, ToUintPtr(mem), true);
93 int i = 0;
94 while (runslots->PopFreeSlot() != nullptr) {
95 i++;
96 }
97 // NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
98 free(mem);
99 LOG(DEBUG, ALLOC) << "Iteration = " << i;
100 }
101
102 private:
103 std::vector<std::pair<void *, size_t>> allocatedMemMmap_;
104 // Mutex, which allows only one thread to add pool to the pool vector
105 os::memory::Mutex poolLock_;
106 };
107
TEST_F(RunSlotsAllocatorTest,SimpleRunSlotsTest)108 TEST_F(RunSlotsAllocatorTest, SimpleRunSlotsTest)
109 {
110 for (size_t i = RunSlotsType::ConvertToPowerOfTwoUnsafe(RunSlotsType::MinSlotSize());
111 i <= RunSlotsType::ConvertToPowerOfTwoUnsafe(RunSlotsType::MaxSlotSize()); i++) {
112 TestRunSlots(1U << i);
113 }
114 }
115
TEST_F(RunSlotsAllocatorTest,SimpleAllocateDifferentObjSizeTest)116 TEST_F(RunSlotsAllocatorTest, SimpleAllocateDifferentObjSizeTest)
117 {
118 LOG(DEBUG, ALLOC) << "SimpleAllocateDifferentObjSizeTest";
119 mem::MemStatsType memStats;
120 NonObjectAllocator allocator(&memStats);
121 AddMemoryPoolToAllocator(allocator);
122 // NOLINTNEXTLINE(readability-magic-numbers)
123 for (size_t i = 23UL; i < 300UL; i++) {
124 void *mem = allocator.Alloc(i);
125 (void)mem;
126 LOG(DEBUG, ALLOC) << "Allocate obj with size " << i << " at " << std::hex << mem;
127 }
128 }
129
TEST_F(RunSlotsAllocatorTest,TestReleaseRunSlotsPagesTest)130 TEST_F(RunSlotsAllocatorTest, TestReleaseRunSlotsPagesTest)
131 {
132 static constexpr size_t ALLOC_SIZE = RunSlotsType::ConvertToPowerOfTwoUnsafe(RunSlotsType::MinSlotSize());
133 LOG(DEBUG, ALLOC) << "TestRunSlotsReusageTestTest";
134 mem::MemStatsType memStats;
135 NonObjectAllocator allocator(&memStats);
136 AddMemoryPoolToAllocator(allocator);
137 std::vector<void *> elements;
138 // Fill the whole pool
139 while (true) {
140 void *mem = allocator.Alloc(ALLOC_SIZE);
141 if (mem == nullptr) {
142 break;
143 }
144 elements.push_back(mem);
145 LOG(DEBUG, ALLOC) << "Allocate obj with size " << ALLOC_SIZE << " at " << std::hex << mem;
146 }
147 // Free everything except the last element
148 ASSERT(elements.size() > 1);
149 size_t elementToFreeCount = elements.size() - 1;
150 for (size_t i = 0; i < elementToFreeCount; i++) {
151 allocator.Free(elements.back());
152 elements.pop_back();
153 }
154
155 // ReleaseRunSlotsPages
156 ReleasePages(allocator);
157
158 // Try to allocate everything again
159 for (size_t i = 0; i < elementToFreeCount; i++) {
160 void *mem = allocator.Alloc(ALLOC_SIZE);
161 ASSERT_TRUE(mem != nullptr);
162 elements.push_back(mem);
163 LOG(DEBUG, ALLOC) << "Allocate obj with size " << ALLOC_SIZE << " at " << std::hex << mem;
164 }
165
166 // Free everything
167 for (auto i : elements) {
168 allocator.Free(i);
169 }
170 }
171
TEST_F(RunSlotsAllocatorTest,AllocateAllPossibleSizesFreeTest)172 TEST_F(RunSlotsAllocatorTest, AllocateAllPossibleSizesFreeTest)
173 {
174 for (size_t i = 1; i <= RunSlotsType::MaxSlotSize(); i++) {
175 AllocateAndFree(i, RUNSLOTS_SIZE / i);
176 }
177 }
178
TEST_F(RunSlotsAllocatorTest,AllocateWriteFreeTest)179 TEST_F(RunSlotsAllocatorTest, AllocateWriteFreeTest)
180 {
181 // NOLINTNEXTLINE(readability-magic-numbers)
182 AllocateAndFree(sizeof(uint64_t), 512UL);
183 }
184
TEST_F(RunSlotsAllocatorTest,AllocateRandomFreeTest)185 TEST_F(RunSlotsAllocatorTest, AllocateRandomFreeTest)
186 {
187 static constexpr size_t ALLOC_SIZE = sizeof(uint64_t);
188 static constexpr size_t ELEMENTS_COUNT = 512;
189 static constexpr size_t POOLS_COUNT = 1;
190 AllocateFreeDifferentSizesTest<ALLOC_SIZE / 2UL, 2UL * ALLOC_SIZE>(ELEMENTS_COUNT, POOLS_COUNT);
191 }
192
TEST_F(RunSlotsAllocatorTest,CheckReuseOfRunSlotsTest)193 TEST_F(RunSlotsAllocatorTest, CheckReuseOfRunSlotsTest)
194 {
195 AllocateReuseTest(RUNSLOTS_ALIGNMENT_MASK);
196 }
197
TEST_F(RunSlotsAllocatorTest,AllocateTooBigObjTest)198 TEST_F(RunSlotsAllocatorTest, AllocateTooBigObjTest)
199 {
200 AllocateTooBigObjectTest<RunSlotsType::MaxSlotSize()>();
201 }
202
TEST_F(RunSlotsAllocatorTest,AlignmentAllocTest)203 TEST_F(RunSlotsAllocatorTest, AlignmentAllocTest)
204 {
205 AlignedAllocFreeTest<1, RunSlotsType::MaxSlotSize(), LOG_ALIGN_MIN, RUNSLOTS_LOG_MAX_ALIGN>();
206 }
207
TEST_F(RunSlotsAllocatorTest,AllocateTooMuchTest)208 TEST_F(RunSlotsAllocatorTest, AllocateTooMuchTest)
209 {
210 static constexpr size_t ALLOC_SIZE = sizeof(uint64_t);
211 AllocateTooMuchTest(ALLOC_SIZE, DEFAULT_POOL_SIZE_FOR_ALLOC / ALLOC_SIZE);
212 }
213
TEST_F(RunSlotsAllocatorTest,AllocateVectorTest)214 TEST_F(RunSlotsAllocatorTest, AllocateVectorTest)
215 {
216 AllocateVectorTest();
217 }
218
TEST_F(RunSlotsAllocatorTest,AllocateReuse2)219 TEST_F(RunSlotsAllocatorTest, AllocateReuse2)
220 {
221 // It's regression test
222 auto *memStats = new mem::MemStatsType();
223 NonObjectAllocator allocator(memStats);
224 static constexpr size_t SIZE1 = 60;
225 static constexpr size_t SIZE2 = 204;
226 constexpr char CHAR1 = 'a';
227 constexpr char CHAR2 = 'b';
228 constexpr char CHAR3 = 'c';
229 constexpr char CHAR4 = 'd';
230 constexpr char CHAR5 = 'e';
231 constexpr char CHAR6 = 'f';
232 AddMemoryPoolToAllocatorProtected(allocator);
233 auto fillStr = [](char *str, char c, size_t size) {
234 for (size_t i = 0; i < size - 1; i++) {
235 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
236 str[i] = c;
237 }
238 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
239 str[size - 1] = 0;
240 };
241 auto checkStr = [](const char *str, char c, size_t size) {
242 for (size_t i = 0; i < size - 1; i++) {
243 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
244 if (str[i] != c) {
245 return false;
246 }
247 }
248 return true;
249 };
250 char *strA = reinterpret_cast<char *>(allocator.Alloc(SIZE1));
251 char *strB = reinterpret_cast<char *>(allocator.Alloc(SIZE1));
252 char *strC = reinterpret_cast<char *>(allocator.Alloc(SIZE1));
253 fillStr(strA, CHAR1, SIZE1);
254 fillStr(strB, CHAR2, SIZE1);
255 fillStr(strC, CHAR3, SIZE1);
256 ASSERT_TRUE(checkStr(strA, CHAR1, SIZE1));
257 ASSERT_TRUE(checkStr(strB, CHAR2, SIZE1));
258 ASSERT_TRUE(checkStr(strC, CHAR3, SIZE1));
259 allocator.Free(static_cast<void *>(strA));
260 allocator.Free(static_cast<void *>(strB));
261 allocator.Free(static_cast<void *>(strC));
262 char *strD = reinterpret_cast<char *>(allocator.Alloc(SIZE2));
263 char *strE = reinterpret_cast<char *>(allocator.Alloc(SIZE2));
264 char *strF = reinterpret_cast<char *>(allocator.Alloc(SIZE2));
265 fillStr(strD, CHAR4, SIZE2);
266 fillStr(strE, CHAR5, SIZE2);
267 fillStr(strF, CHAR6, SIZE2);
268 ASSERT_TRUE(checkStr(strD, CHAR4, SIZE2));
269 ASSERT_TRUE(checkStr(strE, CHAR5, SIZE2));
270 ASSERT_TRUE(checkStr(strF, CHAR6, SIZE2));
271 delete memStats;
272 }
273
TEST_F(RunSlotsAllocatorTest,ObjectIteratorTest)274 TEST_F(RunSlotsAllocatorTest, ObjectIteratorTest)
275 {
276 ObjectIteratorTest<1, RunSlotsType::MaxSlotSize(), LOG_ALIGN_MIN, RUNSLOTS_LOG_MAX_ALIGN>();
277 }
278
TEST_F(RunSlotsAllocatorTest,ObjectCollectionTest)279 TEST_F(RunSlotsAllocatorTest, ObjectCollectionTest)
280 {
281 ObjectCollectionTest<1, RunSlotsType::MaxSlotSize(), LOG_ALIGN_MIN, RUNSLOTS_LOG_MAX_ALIGN>();
282 }
283
TEST_F(RunSlotsAllocatorTest,ObjectIteratorInRangeTest)284 TEST_F(RunSlotsAllocatorTest, ObjectIteratorInRangeTest)
285 {
286 ObjectIteratorInRangeTest<1, RunSlotsType::MaxSlotSize(), LOG_ALIGN_MIN, RUNSLOTS_LOG_MAX_ALIGN>(
287 CrossingMapSingleton::GetCrossingMapGranularity());
288 }
289
TEST_F(RunSlotsAllocatorTest,AsanTest)290 TEST_F(RunSlotsAllocatorTest, AsanTest)
291 {
292 AsanTest();
293 }
294
TEST_F(RunSlotsAllocatorTest,VisitAndRemoveFreePoolsTest)295 TEST_F(RunSlotsAllocatorTest, VisitAndRemoveFreePoolsTest)
296 {
297 static constexpr size_t POOLS_COUNT = 5;
298 VisitAndRemoveFreePools<POOLS_COUNT>(RunSlotsType::MaxSlotSize());
299 }
300
TEST_F(RunSlotsAllocatorTest,AllocatedByRunSlotsAllocatorTest)301 TEST_F(RunSlotsAllocatorTest, AllocatedByRunSlotsAllocatorTest)
302 {
303 AllocatedByThisAllocatorTest();
304 }
305
TEST_F(RunSlotsAllocatorTest,RunSlotsReusingTest)306 TEST_F(RunSlotsAllocatorTest, RunSlotsReusingTest)
307 {
308 static constexpr size_t SMALL_OBJ_SIZE = sizeof(uint32_t);
309 static constexpr size_t BIG_OBJ_SIZE = 128;
310 auto *memStats = new mem::MemStatsType();
311 NonObjectAllocator allocator(memStats);
312 AddMemoryPoolToAllocatorProtected(allocator);
313 // Alloc one big object. this must cause runslots init with it size
314 void *mem = allocator.Alloc(BIG_OBJ_SIZE);
315 // Free this object
316 allocator.Free(mem);
317
318 // Alloc small object. We must reuse already allocated and freed RunSlots
319 void *smallObjMem = allocator.Alloc(SMALL_OBJ_SIZE);
320 size_t smallObjIndex = SetBytesFromByteArray(smallObjMem, SMALL_OBJ_SIZE);
321
322 // Alloc big obj again.
323 void *bigObjMem = allocator.Alloc(BIG_OBJ_SIZE);
324 size_t bigObjIndex = SetBytesFromByteArray(bigObjMem, BIG_OBJ_SIZE);
325
326 // Alloc one more small object.
327 void *secondSmallObjMem = allocator.Alloc(SMALL_OBJ_SIZE);
328 size_t secondSmallObjIndex = SetBytesFromByteArray(secondSmallObjMem, SMALL_OBJ_SIZE);
329
330 ASSERT_TRUE(CompareBytesWithByteArray(bigObjMem, BIG_OBJ_SIZE, bigObjIndex));
331 ASSERT_TRUE(CompareBytesWithByteArray(smallObjMem, SMALL_OBJ_SIZE, smallObjIndex));
332 ASSERT_TRUE(CompareBytesWithByteArray(secondSmallObjMem, SMALL_OBJ_SIZE, secondSmallObjIndex));
333 delete memStats;
334 }
335
TEST_F(RunSlotsAllocatorTest,MTAllocFreeTest)336 TEST_F(RunSlotsAllocatorTest, MTAllocFreeTest)
337 {
338 static constexpr size_t MIN_ELEMENTS_COUNT = 1500;
339 static constexpr size_t MAX_ELEMENTS_COUNT = 3000;
340 #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32)
341 // We have an issue with QEMU during MT tests. Issue 2852
342 static constexpr size_t THREADS_COUNT = 1;
343 #else
344 static constexpr size_t THREADS_COUNT = 10;
345 #endif
346 static constexpr size_t MT_TEST_RUN_COUNT = 5;
347 for (size_t i = 0; i < MT_TEST_RUN_COUNT; i++) {
348 MtAllocFreeTest<1, RunSlotsType::MaxSlotSize(), THREADS_COUNT>(MIN_ELEMENTS_COUNT, MAX_ELEMENTS_COUNT);
349 }
350 }
351
TEST_F(RunSlotsAllocatorTest,MTAllocIterateTest)352 TEST_F(RunSlotsAllocatorTest, MTAllocIterateTest)
353 {
354 static constexpr size_t MIN_ELEMENTS_COUNT = 1500;
355 static constexpr size_t MAX_ELEMENTS_COUNT = 3000;
356 #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32)
357 // We have an issue with QEMU during MT tests. Issue 2852
358 static constexpr size_t THREADS_COUNT = 1;
359 #else
360 static constexpr size_t THREADS_COUNT = 10;
361 #endif
362 static constexpr size_t MT_TEST_RUN_COUNT = 5;
363 for (size_t i = 0; i < MT_TEST_RUN_COUNT; i++) {
364 MtAllocIterateTest<1, RunSlotsType::MaxSlotSize(), THREADS_COUNT>(
365 MIN_ELEMENTS_COUNT, MAX_ELEMENTS_COUNT, CrossingMapSingleton::GetCrossingMapGranularity());
366 }
367 }
368
TEST_F(RunSlotsAllocatorTest,MTAllocCollectTest)369 TEST_F(RunSlotsAllocatorTest, MTAllocCollectTest)
370 {
371 static constexpr size_t MIN_ELEMENTS_COUNT = 1500;
372 static constexpr size_t MAX_ELEMENTS_COUNT = 3000;
373 #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32)
374 // We have an issue with QEMU during MT tests. Issue 2852
375 static constexpr size_t THREADS_COUNT = 1;
376 #else
377 static constexpr size_t THREADS_COUNT = 10;
378 #endif
379 static constexpr size_t MT_TEST_RUN_COUNT = 5;
380 for (size_t i = 0; i < MT_TEST_RUN_COUNT; i++) {
381 MtAllocCollectTest<1, RunSlotsType::MaxSlotSize(), THREADS_COUNT>(MIN_ELEMENTS_COUNT, MAX_ELEMENTS_COUNT);
382 }
383 }
384
385 } // namespace ark::mem
386