1 /**
2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <sys/mman.h>
17
18 #include "libpandabase/os/mem.h"
19 #include "libpandabase/utils/logger.h"
20 #include "runtime/mem/runslots_allocator-inl.h"
21 #include "runtime/mem/runslots_allocator_stl_adapter.h"
22 #include "runtime/tests/allocator_test_base.h"
23
24 namespace panda::mem {
25
26 using NonObjectAllocator = RunSlotsAllocator<EmptyAllocConfigWithCrossingMap>;
27 using RunSlotsType = RunSlots<>;
28
29 class RunSlotsAllocatorTest : public AllocatorTest<NonObjectAllocator> {
30 public:
31 NO_COPY_SEMANTIC(RunSlotsAllocatorTest);
32 NO_MOVE_SEMANTIC(RunSlotsAllocatorTest);
33
34 // NOLINTNEXTLINE(modernize-use-equals-default)
RunSlotsAllocatorTest()35 RunSlotsAllocatorTest()
36 {
37 // Logger::InitializeStdLogging(Logger::Level::DEBUG, Logger::Component::ALL);
38 }
39
~RunSlotsAllocatorTest()40 ~RunSlotsAllocatorTest() override
41 {
42 for (auto i : allocatedMemMmap_) {
43 panda::os::mem::UnmapRaw(std::get<0>(i), std::get<1>(i));
44 }
45 // Logger::Destroy();
46 }
47
48 protected:
49 static constexpr size_t DEFAULT_POOL_SIZE_FOR_ALLOC = NonObjectAllocator::GetMinPoolSize();
50 static constexpr size_t DEFAULT_POOL_ALIGNMENT_FOR_ALLOC = RUNSLOTS_ALIGNMENT_IN_BYTES;
51 static constexpr Alignment RUNSLOTS_LOG_MAX_ALIGN = LOG_ALIGN_8;
52
AddMemoryPoolToAllocator(NonObjectAllocator & alloc)53 void AddMemoryPoolToAllocator(NonObjectAllocator &alloc) override
54 {
55 os::memory::LockHolder lock(poolLock_);
56 void *mem = panda::os::mem::MapRWAnonymousRaw(DEFAULT_POOL_SIZE_FOR_ALLOC);
57 std::pair<void *, size_t> newPair {mem, DEFAULT_POOL_SIZE_FOR_ALLOC};
58 allocatedMemMmap_.push_back(newPair);
59 if (!alloc.AddMemoryPool(mem, DEFAULT_POOL_SIZE_FOR_ALLOC)) {
60 ASSERT_TRUE(0 && "Can't add mem pool to allocator");
61 }
62 }
63
AddMemoryPoolToAllocatorProtected(NonObjectAllocator & alloc)64 void AddMemoryPoolToAllocatorProtected(NonObjectAllocator &alloc) override
65 {
66 os::memory::LockHolder lock(poolLock_);
67 void *mem = panda::os::mem::MapRWAnonymousRaw(DEFAULT_POOL_SIZE_FOR_ALLOC + PAGE_SIZE);
68 mprotect(reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(mem) + DEFAULT_POOL_SIZE_FOR_ALLOC), PAGE_SIZE,
69 PROT_NONE);
70 std::pair<void *, size_t> newPair {mem, DEFAULT_POOL_SIZE_FOR_ALLOC + PAGE_SIZE};
71 allocatedMemMmap_.push_back(newPair);
72 if (!alloc.AddMemoryPool(mem, DEFAULT_POOL_SIZE_FOR_ALLOC)) {
73 ASSERT_TRUE(0 && "Can't add mem pool to allocator");
74 }
75 }
76
ReleasePages(NonObjectAllocator & alloc)77 void ReleasePages(NonObjectAllocator &alloc)
78 {
79 alloc.ReleaseEmptyRunSlotsPagesUnsafe();
80 }
81
AllocatedByThisAllocator(NonObjectAllocator & allocator,void * mem)82 bool AllocatedByThisAllocator(NonObjectAllocator &allocator, void *mem) override
83 {
84 return allocator.AllocatedByRunSlotsAllocator(mem);
85 }
86
TestRunSlots(size_t slotsSize)87 void TestRunSlots(size_t slotsSize)
88 {
89 LOG(DEBUG, ALLOC) << "Test RunSlots with size " << slotsSize;
90 void *mem = aligned_alloc(RUNSLOTS_ALIGNMENT_IN_BYTES, RUNSLOTS_SIZE);
91 auto runslots = reinterpret_cast<RunSlotsType *>(mem);
92 runslots->Initialize(slotsSize, ToUintPtr(mem), true);
93 int i = 0;
94 while (runslots->PopFreeSlot() != nullptr) {
95 i++;
96 }
97 // NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
98 free(mem);
99 LOG(DEBUG, ALLOC) << "Iteration = " << i;
100 }
101
102 private:
103 std::vector<std::pair<void *, size_t>> allocatedMemMmap_;
104 // Mutex, which allows only one thread to add pool to the pool vector
105 os::memory::Mutex poolLock_;
106 };
107
TEST_F(RunSlotsAllocatorTest,SimpleRunSlotsTest)108 TEST_F(RunSlotsAllocatorTest, SimpleRunSlotsTest)
109 {
110 for (size_t i = RunSlotsType::ConvertToPowerOfTwoUnsafe(RunSlotsType::MinSlotSize());
111 i <= RunSlotsType::ConvertToPowerOfTwoUnsafe(RunSlotsType::MaxSlotSize()); i++) {
112 TestRunSlots(1U << i);
113 }
114 }
115
TEST_F(RunSlotsAllocatorTest,SimpleAllocateDifferentObjSizeTest)116 TEST_F(RunSlotsAllocatorTest, SimpleAllocateDifferentObjSizeTest)
117 {
118 LOG(DEBUG, ALLOC) << "SimpleAllocateDifferentObjSizeTest";
119 mem::MemStatsType memStats;
120 NonObjectAllocator allocator(&memStats);
121 AddMemoryPoolToAllocator(allocator);
122 // NOLINTNEXTLINE(readability-magic-numbers)
123 for (size_t i = 23UL; i < 300UL; i++) {
124 void *mem = allocator.Alloc(i);
125 (void)mem;
126 LOG(DEBUG, ALLOC) << "Allocate obj with size " << i << " at " << std::hex << mem;
127 }
128 }
129
TEST_F(RunSlotsAllocatorTest,TestReleaseRunSlotsPagesTest)130 TEST_F(RunSlotsAllocatorTest, TestReleaseRunSlotsPagesTest)
131 {
132 static constexpr size_t ALLOC_SIZE = RunSlotsType::ConvertToPowerOfTwoUnsafe(RunSlotsType::MinSlotSize());
133 LOG(DEBUG, ALLOC) << "TestRunSlotsReusageTestTest";
134 mem::MemStatsType memStats;
135 NonObjectAllocator allocator(&memStats);
136 AddMemoryPoolToAllocator(allocator);
137 std::vector<void *> elements;
138 // Fill the whole pool
139 while (true) {
140 void *mem = allocator.Alloc(ALLOC_SIZE);
141 if (mem == nullptr) {
142 break;
143 }
144 elements.push_back(mem);
145 LOG(DEBUG, ALLOC) << "Allocate obj with size " << ALLOC_SIZE << " at " << std::hex << mem;
146 }
147 // Free everything except the last element
148 ASSERT(elements.size() > 1);
149 size_t elementToFreeCount = elements.size() - 1;
150 for (size_t i = 0; i < elementToFreeCount; i++) {
151 allocator.Free(elements.back());
152 elements.pop_back();
153 }
154
155 // ReleaseRunSlotsPages
156 ReleasePages(allocator);
157
158 // Try to allocate everything again
159 for (size_t i = 0; i < elementToFreeCount; i++) {
160 void *mem = allocator.Alloc(ALLOC_SIZE);
161 ASSERT_TRUE(mem != nullptr);
162 elements.push_back(mem);
163 LOG(DEBUG, ALLOC) << "Allocate obj with size " << ALLOC_SIZE << " at " << std::hex << mem;
164 }
165
166 // Free everything
167 for (auto i : elements) {
168 allocator.Free(i);
169 }
170 }
171
TEST_F(RunSlotsAllocatorTest,AllocateAllPossibleSizesFreeTest)172 TEST_F(RunSlotsAllocatorTest, AllocateAllPossibleSizesFreeTest)
173 {
174 for (size_t i = 1; i <= RunSlotsType::MaxSlotSize(); i++) {
175 AllocateAndFree(i, RUNSLOTS_SIZE / i);
176 }
177 }
178
TEST_F(RunSlotsAllocatorTest,AllocateWriteFreeTest)179 TEST_F(RunSlotsAllocatorTest, AllocateWriteFreeTest)
180 {
181 // NOLINTNEXTLINE(readability-magic-numbers)
182 AllocateAndFree(sizeof(uint64_t), 512UL);
183 }
184
TEST_F(RunSlotsAllocatorTest,AllocateRandomFreeTest)185 TEST_F(RunSlotsAllocatorTest, AllocateRandomFreeTest)
186 {
187 static constexpr size_t ALLOC_SIZE = sizeof(uint64_t);
188 static constexpr size_t ELEMENTS_COUNT = 512;
189 static constexpr size_t POOLS_COUNT = 1;
190 AllocateFreeDifferentSizesTest<ALLOC_SIZE / 2UL, 2UL * ALLOC_SIZE>(ELEMENTS_COUNT, POOLS_COUNT);
191 }
192
TEST_F(RunSlotsAllocatorTest,CheckReuseOfRunSlotsTest)193 TEST_F(RunSlotsAllocatorTest, CheckReuseOfRunSlotsTest)
194 {
195 AllocateReuseTest(RUNSLOTS_ALIGNMENT_MASK);
196 }
197
TEST_F(RunSlotsAllocatorTest,AllocateTooBigObjTest)198 TEST_F(RunSlotsAllocatorTest, AllocateTooBigObjTest)
199 {
200 AllocateTooBigObjectTest<RunSlotsType::MaxSlotSize()>();
201 }
202
TEST_F(RunSlotsAllocatorTest,AlignmentAllocTest)203 TEST_F(RunSlotsAllocatorTest, AlignmentAllocTest)
204 {
205 AlignedAllocFreeTest<1, RunSlotsType::MaxSlotSize(), LOG_ALIGN_MIN, RUNSLOTS_LOG_MAX_ALIGN>();
206 }
207
TEST_F(RunSlotsAllocatorTest,AllocateTooMuchTest)208 TEST_F(RunSlotsAllocatorTest, AllocateTooMuchTest)
209 {
210 static constexpr size_t ALLOC_SIZE = sizeof(uint64_t);
211 AllocateTooMuchTest(ALLOC_SIZE, DEFAULT_POOL_SIZE_FOR_ALLOC / ALLOC_SIZE);
212 }
213
TEST_F(RunSlotsAllocatorTest,AllocateVectorTest)214 TEST_F(RunSlotsAllocatorTest, AllocateVectorTest)
215 {
216 AllocateVectorTest();
217 }
218
TEST_F(RunSlotsAllocatorTest,AllocateReuse2)219 TEST_F(RunSlotsAllocatorTest, AllocateReuse2)
220 {
221 // It's regression test
222 auto *memStats = new mem::MemStatsType();
223 NonObjectAllocator allocator(memStats);
224 static constexpr size_t SIZE1 = 60;
225 static constexpr size_t SIZE2 = 204;
226 constexpr char CHAR1 = 'a';
227 constexpr char CHAR2 = 'b';
228 constexpr char CHAR3 = 'c';
229 constexpr char CHAR4 = 'd';
230 constexpr char CHAR5 = 'e';
231 constexpr char CHAR6 = 'f';
232 AddMemoryPoolToAllocatorProtected(allocator);
233 char *strA;
234 char *strB;
235 char *strC;
236 char *strD;
237 char *strE;
238 char *strF;
239 auto fillStr = [](char *str, char c, size_t size) {
240 for (size_t i = 0; i < size - 1; i++) {
241 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
242 str[i] = c;
243 }
244 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
245 str[size - 1] = 0;
246 };
247 auto checkStr = [](const char *str, char c, size_t size) {
248 for (size_t i = 0; i < size - 1; i++) {
249 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
250 if (str[i] != c) {
251 return false;
252 }
253 }
254 return true;
255 };
256 strA = reinterpret_cast<char *>(allocator.Alloc(SIZE1));
257 strB = reinterpret_cast<char *>(allocator.Alloc(SIZE1));
258 strC = reinterpret_cast<char *>(allocator.Alloc(SIZE1));
259 fillStr(strA, CHAR1, SIZE1);
260 fillStr(strB, CHAR2, SIZE1);
261 fillStr(strC, CHAR3, SIZE1);
262 ASSERT_TRUE(checkStr(strA, CHAR1, SIZE1));
263 ASSERT_TRUE(checkStr(strB, CHAR2, SIZE1));
264 ASSERT_TRUE(checkStr(strC, CHAR3, SIZE1));
265 allocator.Free(static_cast<void *>(strA));
266 allocator.Free(static_cast<void *>(strB));
267 allocator.Free(static_cast<void *>(strC));
268 strD = reinterpret_cast<char *>(allocator.Alloc(SIZE2));
269 strE = reinterpret_cast<char *>(allocator.Alloc(SIZE2));
270 strF = reinterpret_cast<char *>(allocator.Alloc(SIZE2));
271 fillStr(strD, CHAR4, SIZE2);
272 fillStr(strE, CHAR5, SIZE2);
273 fillStr(strF, CHAR6, SIZE2);
274 ASSERT_TRUE(checkStr(strD, CHAR4, SIZE2));
275 ASSERT_TRUE(checkStr(strE, CHAR5, SIZE2));
276 ASSERT_TRUE(checkStr(strF, CHAR6, SIZE2));
277 delete memStats;
278 }
279
TEST_F(RunSlotsAllocatorTest,ObjectIteratorTest)280 TEST_F(RunSlotsAllocatorTest, ObjectIteratorTest)
281 {
282 ObjectIteratorTest<1, RunSlotsType::MaxSlotSize(), LOG_ALIGN_MIN, RUNSLOTS_LOG_MAX_ALIGN>();
283 }
284
TEST_F(RunSlotsAllocatorTest,ObjectCollectionTest)285 TEST_F(RunSlotsAllocatorTest, ObjectCollectionTest)
286 {
287 ObjectCollectionTest<1, RunSlotsType::MaxSlotSize(), LOG_ALIGN_MIN, RUNSLOTS_LOG_MAX_ALIGN>();
288 }
289
TEST_F(RunSlotsAllocatorTest,ObjectIteratorInRangeTest)290 TEST_F(RunSlotsAllocatorTest, ObjectIteratorInRangeTest)
291 {
292 ObjectIteratorInRangeTest<1, RunSlotsType::MaxSlotSize(), LOG_ALIGN_MIN, RUNSLOTS_LOG_MAX_ALIGN>(
293 CrossingMapSingleton::GetCrossingMapGranularity());
294 }
295
TEST_F(RunSlotsAllocatorTest,AsanTest)296 TEST_F(RunSlotsAllocatorTest, AsanTest)
297 {
298 AsanTest();
299 }
300
TEST_F(RunSlotsAllocatorTest,VisitAndRemoveFreePoolsTest)301 TEST_F(RunSlotsAllocatorTest, VisitAndRemoveFreePoolsTest)
302 {
303 static constexpr size_t POOLS_COUNT = 5;
304 VisitAndRemoveFreePools<POOLS_COUNT>(RunSlotsType::MaxSlotSize());
305 }
306
TEST_F(RunSlotsAllocatorTest,AllocatedByRunSlotsAllocatorTest)307 TEST_F(RunSlotsAllocatorTest, AllocatedByRunSlotsAllocatorTest)
308 {
309 AllocatedByThisAllocatorTest();
310 }
311
TEST_F(RunSlotsAllocatorTest,RunSlotsReusingTest)312 TEST_F(RunSlotsAllocatorTest, RunSlotsReusingTest)
313 {
314 static constexpr size_t SMALL_OBJ_SIZE = sizeof(uint32_t);
315 static constexpr size_t BIG_OBJ_SIZE = 128;
316 auto *memStats = new mem::MemStatsType();
317 NonObjectAllocator allocator(memStats);
318 AddMemoryPoolToAllocatorProtected(allocator);
319 // Alloc one big object. this must cause runslots init with it size
320 void *mem = allocator.Alloc(BIG_OBJ_SIZE);
321 // Free this object
322 allocator.Free(mem);
323
324 // Alloc small object. We must reuse already allocated and freed RunSlots
325 void *smallObjMem = allocator.Alloc(SMALL_OBJ_SIZE);
326 size_t smallObjIndex = SetBytesFromByteArray(smallObjMem, SMALL_OBJ_SIZE);
327
328 // Alloc big obj again.
329 void *bigObjMem = allocator.Alloc(BIG_OBJ_SIZE);
330 size_t bigObjIndex = SetBytesFromByteArray(bigObjMem, BIG_OBJ_SIZE);
331
332 // Alloc one more small object.
333 void *secondSmallObjMem = allocator.Alloc(SMALL_OBJ_SIZE);
334 size_t secondSmallObjIndex = SetBytesFromByteArray(secondSmallObjMem, SMALL_OBJ_SIZE);
335
336 ASSERT_TRUE(CompareBytesWithByteArray(bigObjMem, BIG_OBJ_SIZE, bigObjIndex));
337 ASSERT_TRUE(CompareBytesWithByteArray(smallObjMem, SMALL_OBJ_SIZE, smallObjIndex));
338 ASSERT_TRUE(CompareBytesWithByteArray(secondSmallObjMem, SMALL_OBJ_SIZE, secondSmallObjIndex));
339 delete memStats;
340 }
341
TEST_F(RunSlotsAllocatorTest,MTAllocFreeTest)342 TEST_F(RunSlotsAllocatorTest, MTAllocFreeTest)
343 {
344 static constexpr size_t MIN_ELEMENTS_COUNT = 1500;
345 static constexpr size_t MAX_ELEMENTS_COUNT = 3000;
346 #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32)
347 // We have an issue with QEMU during MT tests. Issue 2852
348 static constexpr size_t THREADS_COUNT = 1;
349 #else
350 static constexpr size_t THREADS_COUNT = 10;
351 #endif
352 static constexpr size_t MT_TEST_RUN_COUNT = 5;
353 for (size_t i = 0; i < MT_TEST_RUN_COUNT; i++) {
354 MtAllocFreeTest<1, RunSlotsType::MaxSlotSize(), THREADS_COUNT>(MIN_ELEMENTS_COUNT, MAX_ELEMENTS_COUNT);
355 }
356 }
357
TEST_F(RunSlotsAllocatorTest,MTAllocIterateTest)358 TEST_F(RunSlotsAllocatorTest, MTAllocIterateTest)
359 {
360 static constexpr size_t MIN_ELEMENTS_COUNT = 1500;
361 static constexpr size_t MAX_ELEMENTS_COUNT = 3000;
362 #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32)
363 // We have an issue with QEMU during MT tests. Issue 2852
364 static constexpr size_t THREADS_COUNT = 1;
365 #else
366 static constexpr size_t THREADS_COUNT = 10;
367 #endif
368 static constexpr size_t MT_TEST_RUN_COUNT = 5;
369 for (size_t i = 0; i < MT_TEST_RUN_COUNT; i++) {
370 MtAllocIterateTest<1, RunSlotsType::MaxSlotSize(), THREADS_COUNT>(
371 MIN_ELEMENTS_COUNT, MAX_ELEMENTS_COUNT, CrossingMapSingleton::GetCrossingMapGranularity());
372 }
373 }
374
TEST_F(RunSlotsAllocatorTest,MTAllocCollectTest)375 TEST_F(RunSlotsAllocatorTest, MTAllocCollectTest)
376 {
377 static constexpr size_t MIN_ELEMENTS_COUNT = 1500;
378 static constexpr size_t MAX_ELEMENTS_COUNT = 3000;
379 #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32)
380 // We have an issue with QEMU during MT tests. Issue 2852
381 static constexpr size_t THREADS_COUNT = 1;
382 #else
383 static constexpr size_t THREADS_COUNT = 10;
384 #endif
385 static constexpr size_t MT_TEST_RUN_COUNT = 5;
386 for (size_t i = 0; i < MT_TEST_RUN_COUNT; i++) {
387 MtAllocCollectTest<1, RunSlotsType::MaxSlotSize(), THREADS_COUNT>(MIN_ELEMENTS_COUNT, MAX_ELEMENTS_COUNT);
388 }
389 }
390
391 } // namespace panda::mem
392