1 //===-- release_test.cpp ----------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "tests/scudo_unit_test.h"
10
11 #include "list.h"
12 #include "release.h"
13 #include "size_class_map.h"
14
15 #include <string.h>
16
17 #include <algorithm>
18 #include <random>
19 #include <set>
20
TEST(ScudoReleaseTest,RegionPageMap)21 TEST(ScudoReleaseTest, RegionPageMap) {
22 for (scudo::uptr I = 0; I < SCUDO_WORDSIZE; I++) {
23 // Various valid counter's max values packed into one word.
24 scudo::RegionPageMap PageMap2N(1U, 1U, 1UL << I);
25 ASSERT_TRUE(PageMap2N.isAllocated());
26 EXPECT_EQ(sizeof(scudo::uptr), PageMap2N.getBufferSize());
27 // Check the "all bit set" values too.
28 scudo::RegionPageMap PageMap2N1_1(1U, 1U, ~0UL >> I);
29 ASSERT_TRUE(PageMap2N1_1.isAllocated());
30 EXPECT_EQ(sizeof(scudo::uptr), PageMap2N1_1.getBufferSize());
31 // Verify the packing ratio, the counter is Expected to be packed into the
32 // closest power of 2 bits.
33 scudo::RegionPageMap PageMap(1U, SCUDO_WORDSIZE, 1UL << I);
34 ASSERT_TRUE(PageMap.isAllocated());
35 EXPECT_EQ(sizeof(scudo::uptr) * scudo::roundUpPowerOfTwo(I + 1),
36 PageMap.getBufferSize());
37 }
38
39 // Go through 1, 2, 4, 8, .. {32,64} bits per counter.
40 for (scudo::uptr I = 0; (SCUDO_WORDSIZE >> I) != 0; I++) {
41 // Make sure counters request one memory page for the buffer.
42 const scudo::uptr NumCounters =
43 (scudo::getPageSizeCached() / 8) * (SCUDO_WORDSIZE >> I);
44 scudo::RegionPageMap PageMap(1U, NumCounters,
45 1UL << ((1UL << I) - 1));
46 ASSERT_TRUE(PageMap.isAllocated());
47 PageMap.inc(0U, 0U);
48 for (scudo::uptr C = 1; C < NumCounters - 1; C++) {
49 EXPECT_EQ(0UL, PageMap.get(0U, C));
50 PageMap.inc(0U, C);
51 EXPECT_EQ(1UL, PageMap.get(0U, C - 1));
52 }
53 EXPECT_EQ(0UL, PageMap.get(0U, NumCounters - 1));
54 PageMap.inc(0U, NumCounters - 1);
55 if (I > 0) {
56 PageMap.incRange(0u, 0U, NumCounters - 1);
57 for (scudo::uptr C = 0; C < NumCounters; C++)
58 EXPECT_EQ(2UL, PageMap.get(0U, C));
59 }
60 }
61
62 // Similar to the above except that we are using incN().
63 for (scudo::uptr I = 0; (SCUDO_WORDSIZE >> I) != 0; I++) {
64 // Make sure counters request one memory page for the buffer.
65 const scudo::uptr NumCounters =
66 (scudo::getPageSizeCached() / 8) * (SCUDO_WORDSIZE >> I);
67 scudo::uptr MaxValue = 1UL << ((1UL << I) - 1);
68 if (MaxValue <= 1U)
69 continue;
70
71 scudo::RegionPageMap PageMap(1U, NumCounters, MaxValue);
72
73 scudo::uptr N = MaxValue / 2;
74 PageMap.incN(0U, 0, N);
75 for (scudo::uptr C = 1; C < NumCounters; C++) {
76 EXPECT_EQ(0UL, PageMap.get(0U, C));
77 PageMap.incN(0U, C, N);
78 EXPECT_EQ(N, PageMap.get(0U, C - 1));
79 }
80 EXPECT_EQ(N, PageMap.get(0U, NumCounters - 1));
81 }
82 }
83
84 class StringRangeRecorder {
85 public:
86 std::string ReportedPages;
87
StringRangeRecorder()88 StringRangeRecorder()
89 : PageSizeScaledLog(scudo::getLog2(scudo::getPageSizeCached())) {}
90
releasePageRangeToOS(scudo::uptr From,scudo::uptr To)91 void releasePageRangeToOS(scudo::uptr From, scudo::uptr To) {
92 From >>= PageSizeScaledLog;
93 To >>= PageSizeScaledLog;
94 EXPECT_LT(From, To);
95 if (!ReportedPages.empty())
96 EXPECT_LT(LastPageReported, From);
97 ReportedPages.append(From - LastPageReported, '.');
98 ReportedPages.append(To - From, 'x');
99 LastPageReported = To;
100 }
101
102 private:
103 const scudo::uptr PageSizeScaledLog;
104 scudo::uptr LastPageReported = 0;
105 };
106
TEST(ScudoReleaseTest,FreePagesRangeTracker)107 TEST(ScudoReleaseTest, FreePagesRangeTracker) {
108 // 'x' denotes a page to be released, '.' denotes a page to be kept around.
109 const char *TestCases[] = {
110 "",
111 ".",
112 "x",
113 "........",
114 "xxxxxxxxxxx",
115 "..............xxxxx",
116 "xxxxxxxxxxxxxxxxxx.....",
117 "......xxxxxxxx........",
118 "xxx..........xxxxxxxxxxxxxxx",
119 "......xxxx....xxxx........",
120 "xxx..........xxxxxxxx....xxxxxxx",
121 "x.x.x.x.x.x.x.x.x.x.x.x.",
122 ".x.x.x.x.x.x.x.x.x.x.x.x",
123 ".x.x.x.x.x.x.x.x.x.x.x.x.",
124 "x.x.x.x.x.x.x.x.x.x.x.x.x",
125 };
126 typedef scudo::FreePagesRangeTracker<StringRangeRecorder> RangeTracker;
127
128 for (auto TestCase : TestCases) {
129 StringRangeRecorder Recorder;
130 RangeTracker Tracker(Recorder);
131 for (scudo::uptr I = 0; TestCase[I] != 0; I++)
132 Tracker.processNextPage(TestCase[I] == 'x');
133 Tracker.finish();
134 // Strip trailing '.'-pages before comparing the results as they are not
135 // going to be reported to range_recorder anyway.
136 const char *LastX = strrchr(TestCase, 'x');
137 std::string Expected(TestCase,
138 LastX == nullptr ? 0 : (LastX - TestCase + 1));
139 EXPECT_STREQ(Expected.c_str(), Recorder.ReportedPages.c_str());
140 }
141 }
142
143 class ReleasedPagesRecorder {
144 public:
145 ReleasedPagesRecorder() = default;
ReleasedPagesRecorder(scudo::uptr Base)146 explicit ReleasedPagesRecorder(scudo::uptr Base) : Base(Base) {}
147 std::set<scudo::uptr> ReportedPages;
148
releasePageRangeToOS(scudo::uptr From,scudo::uptr To)149 void releasePageRangeToOS(scudo::uptr From, scudo::uptr To) {
150 const scudo::uptr PageSize = scudo::getPageSizeCached();
151 for (scudo::uptr I = From; I < To; I += PageSize)
152 ReportedPages.insert(I + getBase());
153 }
154
getBase() const155 scudo::uptr getBase() const { return Base; }
156 scudo::uptr Base = 0;
157 };
158
159 // Simplified version of a TransferBatch.
160 template <class SizeClassMap> struct FreeBatch {
161 static const scudo::u16 MaxCount = SizeClassMap::MaxNumCachedHint;
clearFreeBatch162 void clear() { Count = 0; }
addFreeBatch163 void add(scudo::uptr P) {
164 DCHECK_LT(Count, MaxCount);
165 Batch[Count++] = P;
166 }
getCountFreeBatch167 scudo::u16 getCount() const { return Count; }
getFreeBatch168 scudo::uptr get(scudo::u16 I) const {
169 DCHECK_LE(I, Count);
170 return Batch[I];
171 }
172 FreeBatch *Next;
173
174 private:
175 scudo::uptr Batch[MaxCount];
176 scudo::u16 Count;
177 };
178
testReleaseFreeMemoryToOS()179 template <class SizeClassMap> void testReleaseFreeMemoryToOS() {
180 typedef FreeBatch<SizeClassMap> Batch;
181 const scudo::uptr PagesCount = 1024;
182 const scudo::uptr PageSize = scudo::getPageSizeCached();
183 const scudo::uptr PageSizeLog = scudo::getLog2(PageSize);
184 std::mt19937 R;
185 scudo::u32 RandState = 42;
186
187 for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) {
188 const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I);
189 const scudo::uptr MaxBlocks = PagesCount * PageSize / BlockSize;
190
191 // Generate the random free list.
192 std::vector<scudo::uptr> FreeArray;
193 bool InFreeRange = false;
194 scudo::uptr CurrentRangeEnd = 0;
195 for (scudo::uptr I = 0; I < MaxBlocks; I++) {
196 if (I == CurrentRangeEnd) {
197 InFreeRange = (scudo::getRandomU32(&RandState) & 1U) == 1;
198 CurrentRangeEnd += (scudo::getRandomU32(&RandState) & 0x7f) + 1;
199 }
200 if (InFreeRange)
201 FreeArray.push_back(I * BlockSize);
202 }
203 if (FreeArray.empty())
204 continue;
205 // Shuffle the array to ensure that the order is irrelevant.
206 std::shuffle(FreeArray.begin(), FreeArray.end(), R);
207
208 // Build the FreeList from the FreeArray.
209 scudo::SinglyLinkedList<Batch> FreeList;
210 FreeList.clear();
211 Batch *CurrentBatch = nullptr;
212 for (auto const &Block : FreeArray) {
213 if (!CurrentBatch) {
214 CurrentBatch = new Batch;
215 CurrentBatch->clear();
216 FreeList.push_back(CurrentBatch);
217 }
218 CurrentBatch->add(Block);
219 if (CurrentBatch->getCount() == Batch::MaxCount)
220 CurrentBatch = nullptr;
221 }
222
223 // Release the memory.
224 auto SkipRegion = [](UNUSED scudo::uptr RegionIndex) { return false; };
225 auto DecompactPtr = [](scudo::uptr P) { return P; };
226 ReleasedPagesRecorder Recorder;
227 scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
228 /*ReleaseSize=*/MaxBlocks * BlockSize);
229 ASSERT_FALSE(Context.hasBlockMarked());
230 Context.markFreeBlocksInRegion(FreeList, DecompactPtr, Recorder.getBase(),
231 /*RegionIndex=*/0, MaxBlocks * BlockSize,
232 /*MayContainLastBlockInRegion=*/true);
233 ASSERT_TRUE(Context.hasBlockMarked());
234 releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
235 scudo::RegionPageMap &PageMap = Context.PageMap;
236
237 // Verify that there are no released pages touched by used chunks and all
238 // ranges of free chunks big enough to contain the entire memory pages had
239 // these pages released.
240 scudo::uptr VerifiedReleasedPages = 0;
241 std::set<scudo::uptr> FreeBlocks(FreeArray.begin(), FreeArray.end());
242
243 scudo::uptr CurrentBlock = 0;
244 InFreeRange = false;
245 scudo::uptr CurrentFreeRangeStart = 0;
246 for (scudo::uptr I = 0; I < MaxBlocks; I++) {
247 const bool IsFreeBlock =
248 FreeBlocks.find(CurrentBlock) != FreeBlocks.end();
249 if (IsFreeBlock) {
250 if (!InFreeRange) {
251 InFreeRange = true;
252 CurrentFreeRangeStart = CurrentBlock;
253 }
254 } else {
255 // Verify that this used chunk does not touch any released page.
256 const scudo::uptr StartPage = CurrentBlock / PageSize;
257 const scudo::uptr EndPage = (CurrentBlock + BlockSize - 1) / PageSize;
258 for (scudo::uptr J = StartPage; J <= EndPage; J++) {
259 const bool PageReleased = Recorder.ReportedPages.find(J * PageSize) !=
260 Recorder.ReportedPages.end();
261 EXPECT_EQ(false, PageReleased);
262 EXPECT_EQ(false,
263 PageMap.isAllCounted(0, (J * PageSize) >> PageSizeLog));
264 }
265
266 if (InFreeRange) {
267 InFreeRange = false;
268 // Verify that all entire memory pages covered by this range of free
269 // chunks were released.
270 scudo::uptr P = scudo::roundUp(CurrentFreeRangeStart, PageSize);
271 while (P + PageSize <= CurrentBlock) {
272 const bool PageReleased =
273 Recorder.ReportedPages.find(P) != Recorder.ReportedPages.end();
274 EXPECT_EQ(true, PageReleased);
275 EXPECT_EQ(true, PageMap.isAllCounted(0, P >> PageSizeLog));
276 VerifiedReleasedPages++;
277 P += PageSize;
278 }
279 }
280 }
281
282 CurrentBlock += BlockSize;
283 }
284
285 if (InFreeRange) {
286 scudo::uptr P = scudo::roundUp(CurrentFreeRangeStart, PageSize);
287 const scudo::uptr EndPage =
288 scudo::roundUp(MaxBlocks * BlockSize, PageSize);
289 while (P + PageSize <= EndPage) {
290 const bool PageReleased =
291 Recorder.ReportedPages.find(P) != Recorder.ReportedPages.end();
292 EXPECT_EQ(true, PageReleased);
293 EXPECT_EQ(true, PageMap.isAllCounted(0, P >> PageSizeLog));
294 VerifiedReleasedPages++;
295 P += PageSize;
296 }
297 }
298
299 EXPECT_EQ(Recorder.ReportedPages.size(), VerifiedReleasedPages);
300
301 while (!FreeList.empty()) {
302 CurrentBatch = FreeList.front();
303 FreeList.pop_front();
304 delete CurrentBatch;
305 }
306 }
307 }
308
testPageMapMarkRange()309 template <class SizeClassMap> void testPageMapMarkRange() {
310 const scudo::uptr PageSize = scudo::getPageSizeCached();
311
312 for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) {
313 const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I);
314
315 const scudo::uptr GroupNum = 2;
316 const scudo::uptr GroupSize = scudo::roundUp(BlockSize, PageSize) * 2;
317 const scudo::uptr RegionSize =
318 scudo::roundUpSlow(GroupSize * GroupNum, BlockSize);
319 const scudo::uptr RoundedRegionSize = scudo::roundUp(RegionSize, PageSize);
320
321 std::vector<scudo::uptr> Pages(RoundedRegionSize / PageSize, 0);
322 for (scudo::uptr Block = 0; Block < RoundedRegionSize; Block += BlockSize) {
323 for (scudo::uptr Page = Block / PageSize;
324 Page <= (Block + BlockSize - 1) / PageSize &&
325 Page < RoundedRegionSize / PageSize;
326 ++Page) {
327 ASSERT_LT(Page, Pages.size());
328 ++Pages[Page];
329 }
330 }
331
332 for (scudo::uptr GroupId = 0; GroupId < GroupNum; ++GroupId) {
333 const scudo::uptr GroupBeg = GroupId * GroupSize;
334 const scudo::uptr GroupEnd = GroupBeg + GroupSize;
335
336 scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
337 /*ReleaseSize=*/RegionSize);
338 Context.markRangeAsAllCounted(GroupBeg, GroupEnd, /*Base=*/0U,
339 /*RegionIndex=*/0, RegionSize);
340
341 scudo::uptr FirstBlock =
342 ((GroupBeg + BlockSize - 1) / BlockSize) * BlockSize;
343
344 // All the pages before first block page are not supposed to be marked.
345 if (FirstBlock / PageSize > 0) {
346 for (scudo::uptr Page = 0; Page <= FirstBlock / PageSize - 1; ++Page)
347 EXPECT_EQ(Context.PageMap.get(/*Region=*/0, Page), 0U);
348 }
349
350 // Verify the pages used by the blocks in the group except that if the
351 // end of the last block is not aligned with `GroupEnd`, it'll be verified
352 // later.
353 scudo::uptr Block;
354 for (Block = FirstBlock; Block + BlockSize <= GroupEnd;
355 Block += BlockSize) {
356 for (scudo::uptr Page = Block / PageSize;
357 Page <= (Block + BlockSize - 1) / PageSize; ++Page) {
358 // First used page in the group has two cases, which are w/ and w/o
359 // block sitting across the boundary.
360 if (Page == FirstBlock / PageSize) {
361 if (FirstBlock % PageSize == 0) {
362 EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0U, Page));
363 } else {
364 // There's a block straddling `GroupBeg`, it's supposed to only
365 // increment the counter and we expect it should be 1 less
366 // (exclude the straddling one) than the total blocks on the page.
367 EXPECT_EQ(Context.PageMap.get(/*Region=*/0U, Page),
368 Pages[Page] - 1);
369 }
370 } else {
371 EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0, Page));
372 }
373 }
374 }
375
376 if (Block == GroupEnd)
377 continue;
378
379 // Examine the last block which sits across the group boundary.
380 if (Block + BlockSize == RegionSize) {
381 // This is the last block in the region, it's supposed to mark all the
382 // pages as all counted.
383 for (scudo::uptr Page = Block / PageSize;
384 Page <= (Block + BlockSize - 1) / PageSize; ++Page) {
385 EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0, Page));
386 }
387 } else {
388 for (scudo::uptr Page = Block / PageSize;
389 Page <= (Block + BlockSize - 1) / PageSize; ++Page) {
390 if (Page <= (GroupEnd - 1) / PageSize)
391 EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0, Page));
392 else
393 EXPECT_EQ(Context.PageMap.get(/*Region=*/0U, Page), 1U);
394 }
395 }
396
397 const scudo::uptr FirstUncountedPage =
398 scudo::roundUp(Block + BlockSize, PageSize);
399 for (scudo::uptr Page = FirstUncountedPage;
400 Page <= RoundedRegionSize / PageSize; ++Page) {
401 EXPECT_EQ(Context.PageMap.get(/*Region=*/0U, Page), 0U);
402 }
403 } // Iterate each Group
404
405 // Release the entire region. This is to ensure the last page is counted.
406 scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
407 /*ReleaseSize=*/RegionSize);
408 Context.markRangeAsAllCounted(/*From=*/0U, /*To=*/RegionSize, /*Base=*/0,
409 /*RegionIndex=*/0, RegionSize);
410 for (scudo::uptr Page = 0; Page < RoundedRegionSize / PageSize; ++Page)
411 EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0, Page));
412 } // Iterate each size class
413 }
414
testReleasePartialRegion()415 template <class SizeClassMap> void testReleasePartialRegion() {
416 typedef FreeBatch<SizeClassMap> Batch;
417 const scudo::uptr PageSize = scudo::getPageSizeCached();
418
419 for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) {
420 // In the following, we want to ensure the region includes at least 2 pages
421 // and we will release all the pages except the first one. The handling of
422 // the last block is tricky, so we always test the case that includes the
423 // last block.
424 const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I);
425 const scudo::uptr ReleaseBase = scudo::roundUp(BlockSize, PageSize);
426 const scudo::uptr BasePageOffset = ReleaseBase / PageSize;
427 const scudo::uptr RegionSize =
428 scudo::roundUpSlow(scudo::roundUp(BlockSize, PageSize) + ReleaseBase,
429 BlockSize) +
430 BlockSize;
431 const scudo::uptr RoundedRegionSize = scudo::roundUp(RegionSize, PageSize);
432
433 scudo::SinglyLinkedList<Batch> FreeList;
434 FreeList.clear();
435
436 // Skip the blocks in the first page and add the remaining.
437 std::vector<scudo::uptr> Pages(RoundedRegionSize / PageSize, 0);
438 for (scudo::uptr Block = scudo::roundUpSlow(ReleaseBase, BlockSize);
439 Block + BlockSize <= RoundedRegionSize; Block += BlockSize) {
440 for (scudo::uptr Page = Block / PageSize;
441 Page <= (Block + BlockSize - 1) / PageSize; ++Page) {
442 ASSERT_LT(Page, Pages.size());
443 ++Pages[Page];
444 }
445 }
446
447 // This follows the logic how we count the last page. It should be
448 // consistent with how markFreeBlocksInRegion() handles the last block.
449 if (RoundedRegionSize % BlockSize != 0)
450 ++Pages.back();
451
452 Batch *CurrentBatch = nullptr;
453 for (scudo::uptr Block = scudo::roundUpSlow(ReleaseBase, BlockSize);
454 Block < RegionSize; Block += BlockSize) {
455 if (CurrentBatch == nullptr ||
456 CurrentBatch->getCount() == Batch::MaxCount) {
457 CurrentBatch = new Batch;
458 CurrentBatch->clear();
459 FreeList.push_back(CurrentBatch);
460 }
461 CurrentBatch->add(Block);
462 }
463
464 auto VerifyReleaseToOs = [&](scudo::PageReleaseContext &Context) {
465 auto SkipRegion = [](UNUSED scudo::uptr RegionIndex) { return false; };
466 ReleasedPagesRecorder Recorder(ReleaseBase);
467 releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
468 const scudo::uptr FirstBlock = scudo::roundUpSlow(ReleaseBase, BlockSize);
469
470 for (scudo::uptr P = 0; P < RoundedRegionSize; P += PageSize) {
471 if (P < FirstBlock) {
472 // If FirstBlock is not aligned with page boundary, the first touched
473 // page will not be released either.
474 EXPECT_TRUE(Recorder.ReportedPages.find(P) ==
475 Recorder.ReportedPages.end());
476 } else {
477 EXPECT_TRUE(Recorder.ReportedPages.find(P) !=
478 Recorder.ReportedPages.end());
479 }
480 }
481 };
482
483 // Test marking by visiting each block.
484 {
485 auto DecompactPtr = [](scudo::uptr P) { return P; };
486 scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
487 /*ReleaseSize=*/RegionSize - PageSize,
488 ReleaseBase);
489 Context.markFreeBlocksInRegion(FreeList, DecompactPtr, /*Base=*/0U,
490 /*RegionIndex=*/0, RegionSize,
491 /*MayContainLastBlockInRegion=*/true);
492 for (const Batch &It : FreeList) {
493 for (scudo::u16 I = 0; I < It.getCount(); I++) {
494 scudo::uptr Block = It.get(I);
495 for (scudo::uptr Page = Block / PageSize;
496 Page <= (Block + BlockSize - 1) / PageSize; ++Page) {
497 EXPECT_EQ(Pages[Page], Context.PageMap.get(/*Region=*/0U,
498 Page - BasePageOffset));
499 }
500 }
501 }
502
503 VerifyReleaseToOs(Context);
504 }
505
506 // Test range marking.
507 {
508 scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
509 /*ReleaseSize=*/RegionSize - PageSize,
510 ReleaseBase);
511 Context.markRangeAsAllCounted(ReleaseBase, RegionSize, /*Base=*/0U,
512 /*RegionIndex=*/0, RegionSize);
513 for (scudo::uptr Page = ReleaseBase / PageSize;
514 Page < RoundedRegionSize / PageSize; ++Page) {
515 if (Context.PageMap.get(/*Region=*/0, Page - BasePageOffset) !=
516 Pages[Page]) {
517 EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0,
518 Page - BasePageOffset));
519 }
520 }
521
522 VerifyReleaseToOs(Context);
523 }
524
525 // Check the buffer size of PageMap.
526 {
527 scudo::PageReleaseContext Full(BlockSize, /*NumberOfRegions=*/1U,
528 /*ReleaseSize=*/RegionSize);
529 Full.ensurePageMapAllocated();
530 scudo::PageReleaseContext Partial(BlockSize, /*NumberOfRegions=*/1U,
531 /*ReleaseSize=*/RegionSize - PageSize,
532 ReleaseBase);
533 Partial.ensurePageMapAllocated();
534
535 EXPECT_GE(Full.PageMap.getBufferSize(), Partial.PageMap.getBufferSize());
536 }
537
538 while (!FreeList.empty()) {
539 CurrentBatch = FreeList.front();
540 FreeList.pop_front();
541 delete CurrentBatch;
542 }
543 } // Iterate each size class
544 }
545
TEST(ScudoReleaseTest,ReleaseFreeMemoryToOSDefault)546 TEST(ScudoReleaseTest, ReleaseFreeMemoryToOSDefault) {
547 testReleaseFreeMemoryToOS<scudo::DefaultSizeClassMap>();
548 }
549
TEST(ScudoReleaseTest,ReleaseFreeMemoryToOSAndroid)550 TEST(ScudoReleaseTest, ReleaseFreeMemoryToOSAndroid) {
551 testReleaseFreeMemoryToOS<scudo::AndroidSizeClassMap>();
552 }
553
TEST(ScudoReleaseTest,ReleaseFreeMemoryToOSSvelte)554 TEST(ScudoReleaseTest, ReleaseFreeMemoryToOSSvelte) {
555 testReleaseFreeMemoryToOS<scudo::SvelteSizeClassMap>();
556 }
557
TEST(ScudoReleaseTest,PageMapMarkRange)558 TEST(ScudoReleaseTest, PageMapMarkRange) {
559 testPageMapMarkRange<scudo::DefaultSizeClassMap>();
560 testPageMapMarkRange<scudo::AndroidSizeClassMap>();
561 testPageMapMarkRange<scudo::FuchsiaSizeClassMap>();
562 testPageMapMarkRange<scudo::SvelteSizeClassMap>();
563 }
564
TEST(ScudoReleaseTest,ReleasePartialRegion)565 TEST(ScudoReleaseTest, ReleasePartialRegion) {
566 testReleasePartialRegion<scudo::DefaultSizeClassMap>();
567 testReleasePartialRegion<scudo::AndroidSizeClassMap>();
568 testReleasePartialRegion<scudo::FuchsiaSizeClassMap>();
569 testReleasePartialRegion<scudo::SvelteSizeClassMap>();
570 }
571
testReleaseRangeWithSingleBlock()572 template <class SizeClassMap> void testReleaseRangeWithSingleBlock() {
573 const scudo::uptr PageSize = scudo::getPageSizeCached();
574
575 // We want to test if a memory group only contains single block that will be
576 // handled properly. The case is like:
577 //
578 // From To
579 // +----------------------+
580 // +------------+------------+
581 // | | |
582 // +------------+------------+
583 // ^
584 // RegionSize
585 //
586 // Note that `From` will be page aligned.
587 //
588 // If the second from the last block is aligned at `From`, then we expect all
589 // the pages after `From` will be marked as can-be-released. Otherwise, the
590 // pages only touched by the last blocks will be marked as can-be-released.
591 for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) {
592 const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I);
593 const scudo::uptr From = scudo::roundUp(BlockSize, PageSize);
594 const scudo::uptr To =
595 From % BlockSize == 0
596 ? From + BlockSize
597 : scudo::roundDownSlow(From + BlockSize, BlockSize) + BlockSize;
598 const scudo::uptr RoundedRegionSize = scudo::roundUp(To, PageSize);
599
600 std::vector<scudo::uptr> Pages(RoundedRegionSize / PageSize, 0);
601 for (scudo::uptr Block = (To - BlockSize); Block < RoundedRegionSize;
602 Block += BlockSize) {
603 for (scudo::uptr Page = Block / PageSize;
604 Page <= (Block + BlockSize - 1) / PageSize &&
605 Page < RoundedRegionSize / PageSize;
606 ++Page) {
607 ASSERT_LT(Page, Pages.size());
608 ++Pages[Page];
609 }
610 }
611
612 scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
613 /*ReleaseSize=*/To,
614 /*ReleaseBase=*/0U);
615 Context.markRangeAsAllCounted(From, To, /*Base=*/0U, /*RegionIndex=*/0,
616 /*RegionSize=*/To);
617
618 for (scudo::uptr Page = 0; Page < RoundedRegionSize; Page += PageSize) {
619 if (Context.PageMap.get(/*Region=*/0U, Page / PageSize) !=
620 Pages[Page / PageSize]) {
621 EXPECT_TRUE(
622 Context.PageMap.isAllCounted(/*Region=*/0U, Page / PageSize));
623 }
624 }
625 } // for each size class
626 }
627
TEST(ScudoReleaseTest,RangeReleaseRegionWithSingleBlock)628 TEST(ScudoReleaseTest, RangeReleaseRegionWithSingleBlock) {
629 testReleaseRangeWithSingleBlock<scudo::DefaultSizeClassMap>();
630 testReleaseRangeWithSingleBlock<scudo::AndroidSizeClassMap>();
631 testReleaseRangeWithSingleBlock<scudo::FuchsiaSizeClassMap>();
632 testReleaseRangeWithSingleBlock<scudo::SvelteSizeClassMap>();
633 }
634
TEST(ScudoReleaseTest,BufferPool)635 TEST(ScudoReleaseTest, BufferPool) {
636 constexpr scudo::uptr StaticBufferCount = SCUDO_WORDSIZE - 1;
637 constexpr scudo::uptr StaticBufferSize = 512U;
638
639 // Allocate the buffer pool on the heap because it is quite large (slightly
640 // more than StaticBufferCount * StaticBufferSize * sizeof(uptr)) and it may
641 // not fit in the stack on some platforms.
642 using BufferPool = scudo::BufferPool<StaticBufferCount, StaticBufferSize>;
643 std::unique_ptr<BufferPool> Pool(new BufferPool());
644
645 std::vector<std::pair<scudo::uptr *, scudo::uptr>> Buffers;
646 for (scudo::uptr I = 0; I < StaticBufferCount; ++I) {
647 scudo::uptr *P = Pool->getBuffer(StaticBufferSize);
648 EXPECT_TRUE(Pool->isStaticBufferTestOnly(P, StaticBufferSize));
649 Buffers.emplace_back(P, StaticBufferSize);
650 }
651
652 // The static buffer is supposed to be used up.
653 scudo::uptr *P = Pool->getBuffer(StaticBufferSize);
654 EXPECT_FALSE(Pool->isStaticBufferTestOnly(P, StaticBufferSize));
655
656 Pool->releaseBuffer(P, StaticBufferSize);
657 for (auto &Buffer : Buffers)
658 Pool->releaseBuffer(Buffer.first, Buffer.second);
659 }
660