• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- secondary_test.cpp --------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "memtag.h"
10 #include "tests/scudo_unit_test.h"
11 
12 #include "allocator_config.h"
13 #include "allocator_config_wrapper.h"
14 #include "secondary.h"
15 
16 #include <string.h>
17 
18 #include <algorithm>
19 #include <condition_variable>
20 #include <memory>
21 #include <mutex>
22 #include <random>
23 #include <thread>
24 #include <vector>
25 
26 // Get this once to use through-out the tests.
27 const scudo::uptr PageSize = scudo::getPageSizeCached();
28 
getOptionsForConfig()29 template <typename Config> static scudo::Options getOptionsForConfig() {
30   if (!Config::getMaySupportMemoryTagging() ||
31       !scudo::archSupportsMemoryTagging() ||
32       !scudo::systemSupportsMemoryTagging())
33     return {};
34   scudo::AtomicOptions AO;
35   AO.set(scudo::OptionBit::UseMemoryTagging);
36   return AO.load();
37 }
38 
39 template <class Config> struct AllocatorInfoType {
40   std::unique_ptr<scudo::MapAllocator<scudo::SecondaryConfig<Config>>>
41       Allocator;
42   scudo::GlobalStats GlobalStats;
43   scudo::Options Options;
44 
AllocatorInfoTypeAllocatorInfoType45   AllocatorInfoType(scudo::s32 ReleaseToOsInterval) {
46     using SecondaryT = scudo::MapAllocator<scudo::SecondaryConfig<Config>>;
47     Options = getOptionsForConfig<scudo::SecondaryConfig<Config>>();
48     GlobalStats.init();
49     Allocator.reset(new SecondaryT);
50     Allocator->init(&GlobalStats, ReleaseToOsInterval);
51   }
52 
AllocatorInfoTypeAllocatorInfoType53   AllocatorInfoType() : AllocatorInfoType(-1) {}
54 
~AllocatorInfoTypeAllocatorInfoType55   ~AllocatorInfoType() {
56     if (Allocator == nullptr) {
57       return;
58     }
59 
60     if (TEST_HAS_FAILURE) {
61       // Print all of the stats if the test fails.
62       scudo::ScopedString Str;
63       Allocator->getStats(&Str);
64       Str.output();
65     }
66 
67     Allocator->unmapTestOnly();
68   }
69 };
70 
71 struct TestNoCacheConfig {
72   static const bool MaySupportMemoryTagging = false;
73   template <typename> using TSDRegistryT = void;
74   template <typename> using PrimaryT = void;
75   template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
76 
77   struct Secondary {
78     template <typename Config>
79     using CacheT = scudo::MapAllocatorNoCache<Config>;
80   };
81 };
82 
83 struct TestNoCacheNoGuardPageConfig {
84   static const bool MaySupportMemoryTagging = false;
85   template <typename> using TSDRegistryT = void;
86   template <typename> using PrimaryT = void;
87   template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
88 
89   struct Secondary {
90     template <typename Config>
91     using CacheT = scudo::MapAllocatorNoCache<Config>;
92     static const bool EnableGuardPages = false;
93   };
94 };
95 
96 struct TestCacheConfig {
97   static const bool MaySupportMemoryTagging = false;
98   template <typename> using TSDRegistryT = void;
99   template <typename> using PrimaryT = void;
100   template <typename> using SecondaryT = void;
101 
102   struct Secondary {
103     struct Cache {
104       static const scudo::u32 EntriesArraySize = 128U;
105       static const scudo::u32 QuarantineSize = 0U;
106       static const scudo::u32 DefaultMaxEntriesCount = 64U;
107       static const scudo::uptr DefaultMaxEntrySize = 1UL << 20;
108       static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
109       static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
110     };
111 
112     template <typename Config> using CacheT = scudo::MapAllocatorCache<Config>;
113   };
114 };
115 
116 struct TestCacheNoGuardPageConfig {
117   static const bool MaySupportMemoryTagging = false;
118   template <typename> using TSDRegistryT = void;
119   template <typename> using PrimaryT = void;
120   template <typename> using SecondaryT = void;
121 
122   struct Secondary {
123     struct Cache {
124       static const scudo::u32 EntriesArraySize = 128U;
125       static const scudo::u32 QuarantineSize = 0U;
126       static const scudo::u32 DefaultMaxEntriesCount = 64U;
127       static const scudo::uptr DefaultMaxEntrySize = 1UL << 20;
128       static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
129       static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
130     };
131 
132     template <typename Config> using CacheT = scudo::MapAllocatorCache<Config>;
133     static const bool EnableGuardPages = false;
134   };
135 };
136 
testBasic()137 template <typename Config> static void testBasic() {
138   using SecondaryT = scudo::MapAllocator<scudo::SecondaryConfig<Config>>;
139   AllocatorInfoType<Config> Info;
140 
141   const scudo::uptr Size = 1U << 16;
142   void *P = Info.Allocator->allocate(Info.Options, Size);
143   EXPECT_NE(P, nullptr);
144   memset(P, 'A', Size);
145   EXPECT_GE(SecondaryT::getBlockSize(P), Size);
146   Info.Allocator->deallocate(Info.Options, P);
147 
148   // If the Secondary can't cache that pointer, it will be unmapped.
149   if (!Info.Allocator->canCache(Size)) {
150     EXPECT_DEATH(
151         {
152           // Repeat few time to avoid missing crash if it's mmaped by unrelated
153           // code.
154           for (int i = 0; i < 10; ++i) {
155             P = Info.Allocator->allocate(Info.Options, Size);
156             Info.Allocator->deallocate(Info.Options, P);
157             memset(P, 'A', Size);
158           }
159         },
160         "");
161   }
162 
163   const scudo::uptr Align = 1U << 16;
164   P = Info.Allocator->allocate(Info.Options, Size + Align, Align);
165   EXPECT_NE(P, nullptr);
166   void *AlignedP = reinterpret_cast<void *>(
167       scudo::roundUp(reinterpret_cast<scudo::uptr>(P), Align));
168   memset(AlignedP, 'A', Size);
169   Info.Allocator->deallocate(Info.Options, P);
170 
171   std::vector<void *> V;
172   for (scudo::uptr I = 0; I < 32U; I++)
173     V.push_back(Info.Allocator->allocate(Info.Options, Size));
174   std::shuffle(V.begin(), V.end(), std::mt19937(std::random_device()()));
175   while (!V.empty()) {
176     Info.Allocator->deallocate(Info.Options, V.back());
177     V.pop_back();
178   }
179 }
180 
TEST(ScudoSecondaryTest,Basic)181 TEST(ScudoSecondaryTest, Basic) {
182   testBasic<TestNoCacheConfig>();
183   testBasic<TestNoCacheNoGuardPageConfig>();
184   testBasic<TestCacheConfig>();
185   testBasic<TestCacheNoGuardPageConfig>();
186   testBasic<scudo::DefaultConfig>();
187 }
188 
189 // This exercises a variety of combinations of size and alignment for the
190 // MapAllocator. The size computation done here mimic the ones done by the
191 // combined allocator.
testAllocatorCombinations()192 template <typename Config> void testAllocatorCombinations() {
193   AllocatorInfoType<Config> Info;
194 
195   constexpr scudo::uptr MinAlign = FIRST_32_SECOND_64(8, 16);
196   constexpr scudo::uptr HeaderSize = scudo::roundUp(8, MinAlign);
197   for (scudo::uptr SizeLog = 0; SizeLog <= 20; SizeLog++) {
198     for (scudo::uptr AlignLog = FIRST_32_SECOND_64(3, 4); AlignLog <= 16;
199          AlignLog++) {
200       const scudo::uptr Align = 1U << AlignLog;
201       for (scudo::sptr Delta = -128; Delta <= 128; Delta += 8) {
202         if ((1LL << SizeLog) + Delta <= 0)
203           continue;
204         const scudo::uptr UserSize = scudo::roundUp(
205             static_cast<scudo::uptr>((1LL << SizeLog) + Delta), MinAlign);
206         const scudo::uptr Size =
207             HeaderSize + UserSize + (Align > MinAlign ? Align - HeaderSize : 0);
208         void *P = Info.Allocator->allocate(Info.Options, Size, Align);
209         EXPECT_NE(P, nullptr);
210         void *AlignedP = reinterpret_cast<void *>(
211             scudo::roundUp(reinterpret_cast<scudo::uptr>(P), Align));
212         memset(AlignedP, 0xff, UserSize);
213         Info.Allocator->deallocate(Info.Options, P);
214       }
215     }
216   }
217 }
218 
TEST(ScudoSecondaryTest,AllocatorCombinations)219 TEST(ScudoSecondaryTest, AllocatorCombinations) {
220   testAllocatorCombinations<TestNoCacheConfig>();
221   testAllocatorCombinations<TestNoCacheNoGuardPageConfig>();
222 }
223 
testAllocatorIterate()224 template <typename Config> void testAllocatorIterate() {
225   AllocatorInfoType<Config> Info;
226 
227   std::vector<void *> V;
228   for (scudo::uptr I = 0; I < 32U; I++)
229     V.push_back(Info.Allocator->allocate(
230         Info.Options,
231         (static_cast<scudo::uptr>(std::rand()) % 16U) * PageSize));
232   auto Lambda = [&V](scudo::uptr Block) {
233     EXPECT_NE(std::find(V.begin(), V.end(), reinterpret_cast<void *>(Block)),
234               V.end());
235   };
236   Info.Allocator->disable();
237   Info.Allocator->iterateOverBlocks(Lambda);
238   Info.Allocator->enable();
239   while (!V.empty()) {
240     Info.Allocator->deallocate(Info.Options, V.back());
241     V.pop_back();
242   }
243 }
244 
TEST(ScudoSecondaryTest,AllocatorIterate)245 TEST(ScudoSecondaryTest, AllocatorIterate) {
246   testAllocatorIterate<TestNoCacheConfig>();
247   testAllocatorIterate<TestNoCacheNoGuardPageConfig>();
248 }
249 
testAllocatorWithReleaseThreadsRace()250 template <typename Config> void testAllocatorWithReleaseThreadsRace() {
251   AllocatorInfoType<Config> Info(/*ReleaseToOsInterval=*/0);
252 
253   std::mutex Mutex;
254   std::condition_variable Cv;
255   bool Ready = false;
256 
257   std::thread Threads[16];
258   for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
259     Threads[I] = std::thread([&Mutex, &Cv, &Ready, &Info]() {
260       std::vector<void *> V;
261       {
262         std::unique_lock<std::mutex> Lock(Mutex);
263         while (!Ready)
264           Cv.wait(Lock);
265       }
266       for (scudo::uptr I = 0; I < 128U; I++) {
267         // Deallocate 75% of the blocks.
268         const bool Deallocate = (std::rand() & 3) != 0;
269         void *P = Info.Allocator->allocate(
270             Info.Options,
271             (static_cast<scudo::uptr>(std::rand()) % 16U) * PageSize);
272         if (Deallocate)
273           Info.Allocator->deallocate(Info.Options, P);
274         else
275           V.push_back(P);
276       }
277       while (!V.empty()) {
278         Info.Allocator->deallocate(Info.Options, V.back());
279         V.pop_back();
280       }
281     });
282 
283   {
284     std::unique_lock<std::mutex> Lock(Mutex);
285     Ready = true;
286     Cv.notify_all();
287   }
288   for (auto &T : Threads)
289     T.join();
290 }
291 
TEST(ScudoSecondaryTest,AllocatorWithReleaseThreadsRace)292 TEST(ScudoSecondaryTest, AllocatorWithReleaseThreadsRace) {
293   testAllocatorWithReleaseThreadsRace<TestNoCacheConfig>();
294   testAllocatorWithReleaseThreadsRace<TestNoCacheNoGuardPageConfig>();
295 }
296 
297 template <typename Config>
testGetMappedSize(scudo::uptr Size,scudo::uptr * mapped,scudo::uptr * guard_page_size)298 void testGetMappedSize(scudo::uptr Size, scudo::uptr *mapped,
299                        scudo::uptr *guard_page_size) {
300   AllocatorInfoType<Config> Info;
301 
302   scudo::uptr Stats[scudo::StatCount] = {};
303   Info.GlobalStats.get(Stats);
304   *mapped = Stats[scudo::StatMapped];
305   Stats[scudo::StatMapped] = 0;
306 
307   // Make sure the allocation is aligned to a page boundary so that the checks
308   // in the tests can avoid problems due to allocations having different
309   // alignments.
310   void *Ptr = Info.Allocator->allocate(Info.Options, Size, PageSize);
311   EXPECT_NE(Ptr, nullptr);
312 
313   Info.GlobalStats.get(Stats);
314   EXPECT_GE(Stats[scudo::StatMapped], *mapped);
315   *mapped = Stats[scudo::StatMapped] - *mapped;
316 
317   Info.Allocator->deallocate(Info.Options, Ptr);
318 
319   *guard_page_size = Info.Allocator->getGuardPageSize();
320 }
321 
TEST(ScudoSecondaryTest,VerifyGuardPageOption)322 TEST(ScudoSecondaryTest, VerifyGuardPageOption) {
323   static scudo::uptr AllocSize = 1000 * PageSize;
324 
325   // Verify that a config with guard pages enabled:
326   //  - Non-zero sized guard page
327   //  - Mapped in at least the size of the allocation plus 2 * guard page size
328   scudo::uptr guard_mapped = 0;
329   scudo::uptr guard_page_size = 0;
330   testGetMappedSize<TestNoCacheConfig>(AllocSize, &guard_mapped,
331                                        &guard_page_size);
332   EXPECT_GT(guard_page_size, 0U);
333   EXPECT_GE(guard_mapped, AllocSize + 2 * guard_page_size);
334 
335   // Verify that a config with guard pages disabled:
336   //  - Zero sized guard page
337   //  - The total mapped in is greater than the allocation size
338   scudo::uptr no_guard_mapped = 0;
339   scudo::uptr no_guard_page_size = 0;
340   testGetMappedSize<TestNoCacheNoGuardPageConfig>(AllocSize, &no_guard_mapped,
341                                                   &no_guard_page_size);
342   EXPECT_EQ(no_guard_page_size, 0U);
343   EXPECT_GE(no_guard_mapped, AllocSize);
344 
345   // Verify that a guard page config mapped in at least twice the size of
346   // their guard page when compared to a no guard page config.
347   EXPECT_GE(guard_mapped, no_guard_mapped + guard_page_size * 2);
348 }
349 
350 // Value written to cache entries that are unmapped.
351 static scudo::u32 UnmappedMarker = 0xDEADBEEF;
352 
353 template <class Config> struct CacheInfoType {
addMarkerToMapCallbackCacheInfoType354   static void addMarkerToMapCallback(scudo::MemMapT &MemMap) {
355     // When a cache entry is unmaped, don't unmap it write a special marker
356     // to indicate the cache entry was released. The real unmap will happen
357     // in the destructor. It is assumed that all of these maps will be in
358     // the MemMaps vector.
359     scudo::u32 *Ptr = reinterpret_cast<scudo::u32 *>(MemMap.getBase());
360     *Ptr = UnmappedMarker;
361   }
362 
363   using SecondaryConfig = scudo::SecondaryConfig<TestCacheConfig>;
364   using CacheConfig = SecondaryConfig::CacheConfig;
365   using CacheT = scudo::MapAllocatorCache<CacheConfig, addMarkerToMapCallback>;
366   scudo::Options Options = getOptionsForConfig<SecondaryConfig>();
367   std::unique_ptr<CacheT> Cache = std::make_unique<CacheT>();
368   std::vector<scudo::MemMapT> MemMaps;
369   // The current test allocation size is set to the maximum
370   // cache entry size
371   static constexpr scudo::uptr TestAllocSize =
372       CacheConfig::getDefaultMaxEntrySize();
373 
CacheInfoTypeCacheInfoType374   CacheInfoType() { Cache->init(/*ReleaseToOsInterval=*/-1); }
375 
~CacheInfoTypeCacheInfoType376   ~CacheInfoType() {
377     if (Cache == nullptr) {
378       return;
379     }
380 
381     // Clean up MemMaps
382     for (auto &MemMap : MemMaps)
383       MemMap.unmap();
384   }
385 
allocateCacheInfoType386   scudo::MemMapT allocate(scudo::uptr Size) {
387     scudo::uptr MapSize = scudo::roundUp(Size, PageSize);
388     scudo::ReservedMemoryT ReservedMemory;
389     CHECK(ReservedMemory.create(0U, MapSize, nullptr, MAP_ALLOWNOMEM));
390 
391     scudo::MemMapT MemMap = ReservedMemory.dispatch(
392         ReservedMemory.getBase(), ReservedMemory.getCapacity());
393     MemMap.remap(MemMap.getBase(), MemMap.getCapacity(), "scudo:test",
394                  MAP_RESIZABLE | MAP_ALLOWNOMEM);
395     return MemMap;
396   }
397 
fillCacheWithSameSizeBlocksCacheInfoType398   void fillCacheWithSameSizeBlocks(scudo::uptr NumEntries, scudo::uptr Size) {
399     for (scudo::uptr I = 0; I < NumEntries; I++) {
400       MemMaps.emplace_back(allocate(Size));
401       auto &MemMap = MemMaps[I];
402       Cache->store(Options, MemMap.getBase(), MemMap.getCapacity(),
403                    MemMap.getBase(), MemMap);
404     }
405   }
406 };
407 
TEST(ScudoSecondaryTest,AllocatorCacheEntryOrder)408 TEST(ScudoSecondaryTest, AllocatorCacheEntryOrder) {
409   CacheInfoType<TestCacheConfig> Info;
410   using CacheConfig = CacheInfoType<TestCacheConfig>::CacheConfig;
411 
412   Info.Cache->setOption(scudo::Option::MaxCacheEntriesCount,
413                         CacheConfig::getEntriesArraySize());
414 
415   Info.fillCacheWithSameSizeBlocks(CacheConfig::getEntriesArraySize(),
416                                    Info.TestAllocSize);
417 
418   // Retrieval order should be the inverse of insertion order
419   for (scudo::uptr I = CacheConfig::getEntriesArraySize(); I > 0; I--) {
420     scudo::uptr EntryHeaderPos;
421     scudo::CachedBlock Entry = Info.Cache->retrieve(
422         0, Info.TestAllocSize, PageSize, 0, EntryHeaderPos);
423     EXPECT_EQ(Entry.MemMap.getBase(), Info.MemMaps[I - 1].getBase());
424   }
425 }
426 
TEST(ScudoSecondaryTest,AllocatorCachePartialChunkHeuristicRetrievalTest)427 TEST(ScudoSecondaryTest, AllocatorCachePartialChunkHeuristicRetrievalTest) {
428   CacheInfoType<TestCacheConfig> Info;
429 
430   const scudo::uptr FragmentedPages =
431       1 + scudo::CachedBlock::MaxReleasedCachePages;
432   scudo::uptr EntryHeaderPos;
433   scudo::CachedBlock Entry;
434   scudo::MemMapT MemMap = Info.allocate(PageSize + FragmentedPages * PageSize);
435   Info.Cache->store(Info.Options, MemMap.getBase(), MemMap.getCapacity(),
436                     MemMap.getBase(), MemMap);
437 
438   // FragmentedPages > MaxAllowedFragmentedPages so PageSize
439   // cannot be retrieved from the cache
440   Entry = Info.Cache->retrieve(/*MaxAllowedFragmentedPages=*/0, PageSize,
441                                PageSize, 0, EntryHeaderPos);
442   EXPECT_FALSE(Entry.isValid());
443 
444   // FragmentedPages == MaxAllowedFragmentedPages so PageSize
445   // can be retrieved from the cache
446   Entry = Info.Cache->retrieve(FragmentedPages, PageSize, PageSize, 0,
447                                EntryHeaderPos);
448   EXPECT_TRUE(Entry.isValid());
449 
450   MemMap.unmap();
451 }
452 
TEST(ScudoSecondaryTest,AllocatorCacheMemoryLeakTest)453 TEST(ScudoSecondaryTest, AllocatorCacheMemoryLeakTest) {
454   CacheInfoType<TestCacheConfig> Info;
455   using CacheConfig = CacheInfoType<TestCacheConfig>::CacheConfig;
456 
457   // Fill the cache above MaxEntriesCount to force an eviction
458   // The first cache entry should be evicted (because it is the oldest)
459   // due to the maximum number of entries being reached
460   Info.fillCacheWithSameSizeBlocks(CacheConfig::getDefaultMaxEntriesCount() + 1,
461                                    Info.TestAllocSize);
462 
463   std::vector<scudo::CachedBlock> RetrievedEntries;
464 
465   // First MemMap should be evicted from cache because it was the first
466   // inserted into the cache
467   for (scudo::uptr I = CacheConfig::getDefaultMaxEntriesCount(); I > 0; I--) {
468     scudo::uptr EntryHeaderPos;
469     RetrievedEntries.push_back(Info.Cache->retrieve(
470         0, Info.TestAllocSize, PageSize, 0, EntryHeaderPos));
471     EXPECT_EQ(Info.MemMaps[I].getBase(),
472               RetrievedEntries.back().MemMap.getBase());
473   }
474 
475   // Evicted entry should be marked due to unmap callback
476   EXPECT_EQ(*reinterpret_cast<scudo::u32 *>(Info.MemMaps[0].getBase()),
477             UnmappedMarker);
478 }
479 
TEST(ScudoSecondaryTest,AllocatorCacheOptions)480 TEST(ScudoSecondaryTest, AllocatorCacheOptions) {
481   CacheInfoType<TestCacheConfig> Info;
482 
483   // Attempt to set a maximum number of entries higher than the array size.
484   EXPECT_TRUE(
485       Info.Cache->setOption(scudo::Option::MaxCacheEntriesCount, 4096U));
486 
487   // Attempt to set an invalid (negative) number of entries
488   EXPECT_FALSE(Info.Cache->setOption(scudo::Option::MaxCacheEntriesCount, -1));
489 
490   // Various valid combinations.
491   EXPECT_TRUE(Info.Cache->setOption(scudo::Option::MaxCacheEntriesCount, 4U));
492   EXPECT_TRUE(
493       Info.Cache->setOption(scudo::Option::MaxCacheEntrySize, 1UL << 20));
494   EXPECT_TRUE(Info.Cache->canCache(1UL << 18));
495   EXPECT_TRUE(
496       Info.Cache->setOption(scudo::Option::MaxCacheEntrySize, 1UL << 17));
497   EXPECT_FALSE(Info.Cache->canCache(1UL << 18));
498   EXPECT_TRUE(Info.Cache->canCache(1UL << 16));
499   EXPECT_TRUE(Info.Cache->setOption(scudo::Option::MaxCacheEntriesCount, 0U));
500   EXPECT_FALSE(Info.Cache->canCache(1UL << 16));
501   EXPECT_TRUE(Info.Cache->setOption(scudo::Option::MaxCacheEntriesCount, 4U));
502   EXPECT_TRUE(
503       Info.Cache->setOption(scudo::Option::MaxCacheEntrySize, 1UL << 20));
504   EXPECT_TRUE(Info.Cache->canCache(1UL << 16));
505 }
506