• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- primary64.h ---------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_PRIMARY64_H_
10 #define SCUDO_PRIMARY64_H_
11 
12 #include "bytemap.h"
13 #include "common.h"
14 #include "list.h"
15 #include "local_cache.h"
16 #include "memtag.h"
17 #include "release.h"
18 #include "stats.h"
19 #include "string_utils.h"
20 
21 namespace scudo {
22 
23 // SizeClassAllocator64 is an allocator tuned for 64-bit address space.
24 //
25 // It starts by reserving NumClasses * 2^RegionSizeLog bytes, equally divided in
26 // Regions, specific to each size class. Note that the base of that mapping is
27 // random (based to the platform specific map() capabilities), and that each
28 // Region actually starts at a random offset from its base.
29 //
30 // Regions are mapped incrementally on demand to fulfill allocation requests,
31 // those mappings being split into equally sized Blocks based on the size class
32 // they belong to. The Blocks created are shuffled to prevent predictable
33 // address patterns (the predictability increases with the size of the Blocks).
34 //
35 // The 1st Region (for size class 0) holds the TransferBatches. This is a
36 // structure used to transfer arrays of available pointers from the class size
37 // freelist to the thread specific freelist, and back.
38 //
39 // The memory used by this allocator is never unmapped, but can be partially
40 // released if the platform allows for it.
41 
42 template <class SizeClassMapT, uptr RegionSizeLog,
43           s32 MinReleaseToOsIntervalMs = INT32_MIN,
44           s32 MaxReleaseToOsIntervalMs = INT32_MAX,
45           bool MaySupportMemoryTagging = false>
46 class SizeClassAllocator64 {
47 public:
48   typedef SizeClassMapT SizeClassMap;
49   typedef SizeClassAllocator64<
50       SizeClassMap, RegionSizeLog, MinReleaseToOsIntervalMs,
51       MaxReleaseToOsIntervalMs, MaySupportMemoryTagging>
52       ThisT;
53   typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
54   typedef typename CacheT::TransferBatch TransferBatch;
55   static const bool SupportsMemoryTagging =
56       MaySupportMemoryTagging && archSupportsMemoryTagging();
57 
getSizeByClassId(uptr ClassId)58   static uptr getSizeByClassId(uptr ClassId) {
59     return (ClassId == SizeClassMap::BatchClassId)
60                ? sizeof(TransferBatch)
61                : SizeClassMap::getSizeByClassId(ClassId);
62   }
63 
canAllocate(uptr Size)64   static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
65 
initLinkerInitialized(s32 ReleaseToOsInterval)66   void initLinkerInitialized(s32 ReleaseToOsInterval) {
67     // Reserve the space required for the Primary.
68     PrimaryBase = reinterpret_cast<uptr>(
69         map(nullptr, PrimarySize, "scudo:primary", MAP_NOACCESS, &Data));
70 
71     u32 Seed;
72     const u64 Time = getMonotonicTime();
73     if (UNLIKELY(!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed))))
74       Seed = static_cast<u32>(Time ^ (PrimaryBase >> 12));
75     const uptr PageSize = getPageSizeCached();
76     for (uptr I = 0; I < NumClasses; I++) {
77       RegionInfo *Region = getRegionInfo(I);
78       // The actual start of a region is offseted by a random number of pages.
79       Region->RegionBeg =
80           getRegionBaseByClassId(I) + (getRandomModN(&Seed, 16) + 1) * PageSize;
81       Region->RandState = getRandomU32(&Seed);
82       // Releasing smaller size classes doesn't necessarily yield to a
83       // meaningful RSS impact: there are more blocks per page, they are
84       // randomized around, and thus pages are less likely to be entirely empty.
85       // On top of this, attempting to release those require more iterations and
86       // memory accesses which ends up being fairly costly. The current lower
87       // limit is mostly arbitrary and based on empirical observations.
88       // TODO(kostyak): make the lower limit a runtime option
89       Region->CanRelease = (I != SizeClassMap::BatchClassId) &&
90                            (getSizeByClassId(I) >= (PageSize / 32));
91       if (Region->CanRelease)
92         Region->ReleaseInfo.LastReleaseAtNs = Time;
93     }
94     setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
95 
96     if (SupportsMemoryTagging)
97       UseMemoryTagging = systemSupportsMemoryTagging();
98   }
init(s32 ReleaseToOsInterval)99   void init(s32 ReleaseToOsInterval) {
100     memset(this, 0, sizeof(*this));
101     initLinkerInitialized(ReleaseToOsInterval);
102   }
103 
unmapTestOnly()104   void unmapTestOnly() {
105     unmap(reinterpret_cast<void *>(PrimaryBase), PrimarySize, UNMAP_ALL, &Data);
106   }
107 
popBatch(CacheT * C,uptr ClassId)108   TransferBatch *popBatch(CacheT *C, uptr ClassId) {
109     DCHECK_LT(ClassId, NumClasses);
110     RegionInfo *Region = getRegionInfo(ClassId);
111     ScopedLock L(Region->Mutex);
112     TransferBatch *B = Region->FreeList.front();
113     if (B) {
114       Region->FreeList.pop_front();
115     } else {
116       B = populateFreeList(C, ClassId, Region);
117       if (UNLIKELY(!B))
118         return nullptr;
119     }
120     DCHECK_GT(B->getCount(), 0);
121     Region->Stats.PoppedBlocks += B->getCount();
122     return B;
123   }
124 
pushBatch(uptr ClassId,TransferBatch * B)125   void pushBatch(uptr ClassId, TransferBatch *B) {
126     DCHECK_GT(B->getCount(), 0);
127     RegionInfo *Region = getRegionInfo(ClassId);
128     ScopedLock L(Region->Mutex);
129     Region->FreeList.push_front(B);
130     Region->Stats.PushedBlocks += B->getCount();
131     if (Region->CanRelease)
132       releaseToOSMaybe(Region, ClassId);
133   }
134 
disable()135   void disable() {
136     // The BatchClassId must be locked last since other classes can use it.
137     for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
138       if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
139         continue;
140       getRegionInfo(static_cast<uptr>(I))->Mutex.lock();
141     }
142     getRegionInfo(SizeClassMap::BatchClassId)->Mutex.lock();
143   }
144 
enable()145   void enable() {
146     getRegionInfo(SizeClassMap::BatchClassId)->Mutex.unlock();
147     for (uptr I = 0; I < NumClasses; I++) {
148       if (I == SizeClassMap::BatchClassId)
149         continue;
150       getRegionInfo(I)->Mutex.unlock();
151     }
152   }
153 
iterateOverBlocks(F Callback)154   template <typename F> void iterateOverBlocks(F Callback) {
155     for (uptr I = 0; I < NumClasses; I++) {
156       if (I == SizeClassMap::BatchClassId)
157         continue;
158       const RegionInfo *Region = getRegionInfo(I);
159       const uptr BlockSize = getSizeByClassId(I);
160       const uptr From = Region->RegionBeg;
161       const uptr To = From + Region->AllocatedUser;
162       for (uptr Block = From; Block < To; Block += BlockSize)
163         Callback(Block);
164     }
165   }
166 
getStats(ScopedString * Str)167   void getStats(ScopedString *Str) {
168     // TODO(kostyak): get the RSS per region.
169     uptr TotalMapped = 0;
170     uptr PoppedBlocks = 0;
171     uptr PushedBlocks = 0;
172     for (uptr I = 0; I < NumClasses; I++) {
173       RegionInfo *Region = getRegionInfo(I);
174       if (Region->MappedUser)
175         TotalMapped += Region->MappedUser;
176       PoppedBlocks += Region->Stats.PoppedBlocks;
177       PushedBlocks += Region->Stats.PushedBlocks;
178     }
179     Str->append("Stats: SizeClassAllocator64: %zuM mapped (%zuM rss) in %zu "
180                 "allocations; remains %zu\n",
181                 TotalMapped >> 20, 0, PoppedBlocks,
182                 PoppedBlocks - PushedBlocks);
183 
184     for (uptr I = 0; I < NumClasses; I++)
185       getStats(Str, I, 0);
186   }
187 
setOption(Option O,sptr Value)188   bool setOption(Option O, sptr Value) {
189     if (O == Option::ReleaseInterval) {
190       const s32 Interval =
191           Max(Min(static_cast<s32>(Value), MaxReleaseToOsIntervalMs),
192               MinReleaseToOsIntervalMs);
193       atomic_store(&ReleaseToOsIntervalMs, Interval, memory_order_relaxed);
194       return true;
195     }
196     // Not supported by the Primary, but not an error either.
197     return true;
198   }
199 
releaseToOS()200   uptr releaseToOS() {
201     uptr TotalReleasedBytes = 0;
202     for (uptr I = 0; I < NumClasses; I++) {
203       RegionInfo *Region = getRegionInfo(I);
204       ScopedLock L(Region->Mutex);
205       TotalReleasedBytes += releaseToOSMaybe(Region, I, /*Force=*/true);
206     }
207     return TotalReleasedBytes;
208   }
209 
useMemoryTagging()210   bool useMemoryTagging() const {
211     return SupportsMemoryTagging && UseMemoryTagging;
212   }
disableMemoryTagging()213   void disableMemoryTagging() { UseMemoryTagging = false; }
214 
215 private:
216   static const uptr RegionSize = 1UL << RegionSizeLog;
217   static const uptr NumClasses = SizeClassMap::NumClasses;
218   static const uptr PrimarySize = RegionSize * NumClasses;
219 
220   // Call map for user memory with at least this size.
221   static const uptr MapSizeIncrement = 1UL << 18;
222   // Fill at most this number of batches from the newly map'd memory.
223   static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
224 
225   struct RegionStats {
226     uptr PoppedBlocks;
227     uptr PushedBlocks;
228   };
229 
230   struct ReleaseToOsInfo {
231     uptr PushedBlocksAtLastRelease;
232     uptr RangesReleased;
233     uptr LastReleasedBytes;
234     u64 LastReleaseAtNs;
235   };
236 
ALIGNED(SCUDO_CACHE_LINE_SIZE)237   struct ALIGNED(SCUDO_CACHE_LINE_SIZE) RegionInfo {
238     HybridMutex Mutex;
239     SinglyLinkedList<TransferBatch> FreeList;
240     RegionStats Stats;
241     bool CanRelease;
242     bool Exhausted;
243     u32 RandState;
244     uptr RegionBeg;
245     uptr MappedUser;    // Bytes mapped for user memory.
246     uptr AllocatedUser; // Bytes allocated for user memory.
247     MapPlatformData Data;
248     ReleaseToOsInfo ReleaseInfo;
249   };
250   static_assert(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
251 
252   uptr PrimaryBase;
253   MapPlatformData Data;
254   atomic_s32 ReleaseToOsIntervalMs;
255   bool UseMemoryTagging;
256   RegionInfo RegionInfoArray[NumClasses];
257 
getRegionInfo(uptr ClassId)258   RegionInfo *getRegionInfo(uptr ClassId) {
259     DCHECK_LT(ClassId, NumClasses);
260     return &RegionInfoArray[ClassId];
261   }
262 
getRegionBaseByClassId(uptr ClassId)263   uptr getRegionBaseByClassId(uptr ClassId) const {
264     return PrimaryBase + (ClassId << RegionSizeLog);
265   }
266 
populateBatches(CacheT * C,RegionInfo * Region,uptr ClassId,TransferBatch ** CurrentBatch,u32 MaxCount,void ** PointersArray,u32 Count)267   bool populateBatches(CacheT *C, RegionInfo *Region, uptr ClassId,
268                        TransferBatch **CurrentBatch, u32 MaxCount,
269                        void **PointersArray, u32 Count) {
270     // No need to shuffle the batches size class.
271     if (ClassId != SizeClassMap::BatchClassId)
272       shuffle(PointersArray, Count, &Region->RandState);
273     TransferBatch *B = *CurrentBatch;
274     for (uptr I = 0; I < Count; I++) {
275       if (B && B->getCount() == MaxCount) {
276         Region->FreeList.push_back(B);
277         B = nullptr;
278       }
279       if (!B) {
280         B = C->createBatch(ClassId, PointersArray[I]);
281         if (UNLIKELY(!B))
282           return false;
283         B->clear();
284       }
285       B->add(PointersArray[I]);
286     }
287     *CurrentBatch = B;
288     return true;
289   }
290 
populateFreeList(CacheT * C,uptr ClassId,RegionInfo * Region)291   NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId,
292                                            RegionInfo *Region) {
293     const uptr Size = getSizeByClassId(ClassId);
294     const u32 MaxCount = TransferBatch::getMaxCached(Size);
295 
296     const uptr RegionBeg = Region->RegionBeg;
297     const uptr MappedUser = Region->MappedUser;
298     const uptr TotalUserBytes = Region->AllocatedUser + MaxCount * Size;
299     // Map more space for blocks, if necessary.
300     if (TotalUserBytes > MappedUser) {
301       // Do the mmap for the user memory.
302       const uptr UserMapSize =
303           roundUpTo(TotalUserBytes - MappedUser, MapSizeIncrement);
304       const uptr RegionBase = RegionBeg - getRegionBaseByClassId(ClassId);
305       if (UNLIKELY(RegionBase + MappedUser + UserMapSize > RegionSize)) {
306         if (!Region->Exhausted) {
307           Region->Exhausted = true;
308           ScopedString Str(1024);
309           getStats(&Str);
310           Str.append(
311               "Scudo OOM: The process has Exhausted %zuM for size class %zu.\n",
312               RegionSize >> 20, Size);
313           Str.output();
314         }
315         return nullptr;
316       }
317       if (UNLIKELY(MappedUser == 0))
318         Region->Data = Data;
319       if (UNLIKELY(!map(reinterpret_cast<void *>(RegionBeg + MappedUser),
320                         UserMapSize, "scudo:primary",
321                         MAP_ALLOWNOMEM | MAP_RESIZABLE |
322                             (useMemoryTagging() ? MAP_MEMTAG : 0),
323                         &Region->Data)))
324         return nullptr;
325       Region->MappedUser += UserMapSize;
326       C->getStats().add(StatMapped, UserMapSize);
327     }
328 
329     const u32 NumberOfBlocks = Min(
330         MaxNumBatches * MaxCount,
331         static_cast<u32>((Region->MappedUser - Region->AllocatedUser) / Size));
332     DCHECK_GT(NumberOfBlocks, 0);
333 
334     TransferBatch *B = nullptr;
335     constexpr u32 ShuffleArraySize =
336         MaxNumBatches * TransferBatch::MaxNumCached;
337     void *ShuffleArray[ShuffleArraySize];
338     u32 Count = 0;
339     const uptr P = RegionBeg + Region->AllocatedUser;
340     const uptr AllocatedUser = Size * NumberOfBlocks;
341     for (uptr I = P; I < P + AllocatedUser; I += Size) {
342       ShuffleArray[Count++] = reinterpret_cast<void *>(I);
343       if (Count == ShuffleArraySize) {
344         if (UNLIKELY(!populateBatches(C, Region, ClassId, &B, MaxCount,
345                                       ShuffleArray, Count)))
346           return nullptr;
347         Count = 0;
348       }
349     }
350     if (Count) {
351       if (UNLIKELY(!populateBatches(C, Region, ClassId, &B, MaxCount,
352                                     ShuffleArray, Count)))
353         return nullptr;
354     }
355     DCHECK(B);
356     if (!Region->FreeList.empty()) {
357       Region->FreeList.push_back(B);
358       B = Region->FreeList.front();
359       Region->FreeList.pop_front();
360     }
361     DCHECK_GT(B->getCount(), 0);
362 
363     C->getStats().add(StatFree, AllocatedUser);
364     Region->AllocatedUser += AllocatedUser;
365     Region->Exhausted = false;
366 
367     return B;
368   }
369 
getStats(ScopedString * Str,uptr ClassId,uptr Rss)370   void getStats(ScopedString *Str, uptr ClassId, uptr Rss) {
371     RegionInfo *Region = getRegionInfo(ClassId);
372     if (Region->MappedUser == 0)
373       return;
374     const uptr InUse = Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks;
375     const uptr TotalChunks = Region->AllocatedUser / getSizeByClassId(ClassId);
376     Str->append("%s %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
377                 "inuse: %6zu total: %6zu rss: %6zuK releases: %6zu last "
378                 "released: %6zuK region: 0x%zx (0x%zx)\n",
379                 Region->Exhausted ? "F" : " ", ClassId,
380                 getSizeByClassId(ClassId), Region->MappedUser >> 10,
381                 Region->Stats.PoppedBlocks, Region->Stats.PushedBlocks, InUse,
382                 TotalChunks, Rss >> 10, Region->ReleaseInfo.RangesReleased,
383                 Region->ReleaseInfo.LastReleasedBytes >> 10, Region->RegionBeg,
384                 getRegionBaseByClassId(ClassId));
385   }
386 
387   NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId,
388                                  bool Force = false) {
389     const uptr BlockSize = getSizeByClassId(ClassId);
390     const uptr PageSize = getPageSizeCached();
391 
392     CHECK_GE(Region->Stats.PoppedBlocks, Region->Stats.PushedBlocks);
393     const uptr BytesInFreeList =
394         Region->AllocatedUser -
395         (Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks) * BlockSize;
396     if (BytesInFreeList < PageSize)
397       return 0; // No chance to release anything.
398     const uptr BytesPushed = (Region->Stats.PushedBlocks -
399                               Region->ReleaseInfo.PushedBlocksAtLastRelease) *
400                              BlockSize;
401     if (BytesPushed < PageSize)
402       return 0; // Nothing new to release.
403 
404     // Releasing smaller blocks is expensive, so we want to make sure that a
405     // significant amount of bytes are free, and that there has been a good
406     // amount of batches pushed to the freelist before attempting to release.
407     if (BlockSize < PageSize / 16U) {
408       if (!Force && BytesPushed < Region->AllocatedUser / 16U)
409         return 0;
410       // We want 8x% to 9x% free bytes (the larger the bock, the lower the %).
411       if ((BytesInFreeList * 100U) / Region->AllocatedUser <
412           (100U - 1U - BlockSize / 16U))
413         return 0;
414     }
415 
416     if (!Force) {
417       const s32 IntervalMs =
418           atomic_load(&ReleaseToOsIntervalMs, memory_order_relaxed);
419       if (IntervalMs < 0)
420         return 0;
421       if (Region->ReleaseInfo.LastReleaseAtNs +
422               static_cast<u64>(IntervalMs) * 1000000 >
423           getMonotonicTime()) {
424         return 0; // Memory was returned recently.
425       }
426     }
427 
428     auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
429     ReleaseRecorder Recorder(Region->RegionBeg, &Region->Data);
430     releaseFreeMemoryToOS(Region->FreeList, Region->RegionBeg,
431                           Region->AllocatedUser, 1U, BlockSize, &Recorder,
432                           SkipRegion);
433 
434     if (Recorder.getReleasedRangesCount() > 0) {
435       Region->ReleaseInfo.PushedBlocksAtLastRelease =
436           Region->Stats.PushedBlocks;
437       Region->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
438       Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
439     }
440     Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
441     return Recorder.getReleasedBytes();
442   }
443 };
444 
445 } // namespace scudo
446 
447 #endif // SCUDO_PRIMARY64_H_
448