• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- primary32.h ---------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_PRIMARY32_H_
10 #define SCUDO_PRIMARY32_H_
11 
12 #include "bytemap.h"
13 #include "common.h"
14 #include "list.h"
15 #include "local_cache.h"
16 #include "options.h"
17 #include "release.h"
18 #include "report.h"
19 #include "stats.h"
20 #include "string_utils.h"
21 #include "thread_annotations.h"
22 
23 namespace scudo {
24 
25 // SizeClassAllocator32 is an allocator for 32 or 64-bit address space.
26 //
27 // It maps Regions of 2^RegionSizeLog bytes aligned on a 2^RegionSizeLog bytes
28 // boundary, and keeps a bytemap of the mappable address space to track the size
29 // class they are associated with.
30 //
31 // Mapped regions are split into equally sized Blocks according to the size
32 // class they belong to, and the associated pointers are shuffled to prevent any
33 // predictable address pattern (the predictability increases with the block
34 // size).
35 //
36 // Regions for size class 0 are special and used to hold TransferBatches, which
37 // allow to transfer arrays of pointers from the global size class freelist to
38 // the thread specific freelist for said class, and back.
39 //
40 // Memory used by this allocator is never unmapped but can be partially
41 // reclaimed if the platform allows for it.
42 
43 template <typename Config> class SizeClassAllocator32 {
44 public:
45   typedef typename Config::PrimaryCompactPtrT CompactPtrT;
46   typedef typename Config::SizeClassMap SizeClassMap;
47   static const uptr GroupSizeLog = Config::PrimaryGroupSizeLog;
48   // The bytemap can only track UINT8_MAX - 1 classes.
49   static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), "");
50   // Regions should be large enough to hold the largest Block.
51   static_assert((1UL << Config::PrimaryRegionSizeLog) >= SizeClassMap::MaxSize,
52                 "");
53   typedef SizeClassAllocator32<Config> ThisT;
54   typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
55   typedef typename CacheT::TransferBatch TransferBatch;
56   typedef typename CacheT::BatchGroup BatchGroup;
57 
getSizeByClassId(uptr ClassId)58   static uptr getSizeByClassId(uptr ClassId) {
59     return (ClassId == SizeClassMap::BatchClassId)
60                ? sizeof(TransferBatch)
61                : SizeClassMap::getSizeByClassId(ClassId);
62   }
63 
canAllocate(uptr Size)64   static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
65 
init(s32 ReleaseToOsInterval)66   void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
67     if (SCUDO_FUCHSIA)
68       reportError("SizeClassAllocator32 is not supported on Fuchsia");
69 
70     if (SCUDO_TRUSTY)
71       reportError("SizeClassAllocator32 is not supported on Trusty");
72 
73     DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
74     PossibleRegions.init();
75     u32 Seed;
76     const u64 Time = getMonotonicTimeFast();
77     if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
78       Seed = static_cast<u32>(
79           Time ^ (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6));
80     for (uptr I = 0; I < NumClasses; I++) {
81       SizeClassInfo *Sci = getSizeClassInfo(I);
82       Sci->RandState = getRandomU32(&Seed);
83       // Sci->MaxRegionIndex is already initialized to 0.
84       Sci->MinRegionIndex = NumRegions;
85       Sci->ReleaseInfo.LastReleaseAtNs = Time;
86     }
87     setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
88   }
89 
unmapTestOnly()90   void unmapTestOnly() {
91     {
92       ScopedLock L(RegionsStashMutex);
93       while (NumberOfStashedRegions > 0) {
94         unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
95               RegionSize);
96       }
97     }
98 
99     uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
100     for (uptr I = 0; I < NumClasses; I++) {
101       SizeClassInfo *Sci = getSizeClassInfo(I);
102       ScopedLock L(Sci->Mutex);
103       if (Sci->MinRegionIndex < MinRegionIndex)
104         MinRegionIndex = Sci->MinRegionIndex;
105       if (Sci->MaxRegionIndex > MaxRegionIndex)
106         MaxRegionIndex = Sci->MaxRegionIndex;
107       *Sci = {};
108     }
109 
110     ScopedLock L(ByteMapMutex);
111     for (uptr I = MinRegionIndex; I < MaxRegionIndex; I++)
112       if (PossibleRegions[I])
113         unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
114     PossibleRegions.unmapTestOnly();
115   }
116 
compactPtr(UNUSED uptr ClassId,uptr Ptr)117   CompactPtrT compactPtr(UNUSED uptr ClassId, uptr Ptr) const {
118     return static_cast<CompactPtrT>(Ptr);
119   }
120 
decompactPtr(UNUSED uptr ClassId,CompactPtrT CompactPtr)121   void *decompactPtr(UNUSED uptr ClassId, CompactPtrT CompactPtr) const {
122     return reinterpret_cast<void *>(static_cast<uptr>(CompactPtr));
123   }
124 
compactPtrGroupBase(CompactPtrT CompactPtr)125   uptr compactPtrGroupBase(CompactPtrT CompactPtr) {
126     const uptr Mask = (static_cast<uptr>(1) << GroupSizeLog) - 1;
127     return CompactPtr & ~Mask;
128   }
129 
decompactGroupBase(uptr CompactPtrGroupBase)130   uptr decompactGroupBase(uptr CompactPtrGroupBase) {
131     return CompactPtrGroupBase;
132   }
133 
popBatch(CacheT * C,uptr ClassId)134   TransferBatch *popBatch(CacheT *C, uptr ClassId) {
135     DCHECK_LT(ClassId, NumClasses);
136     SizeClassInfo *Sci = getSizeClassInfo(ClassId);
137     ScopedLock L(Sci->Mutex);
138     TransferBatch *B = popBatchImpl(C, ClassId, Sci);
139     if (UNLIKELY(!B)) {
140       if (UNLIKELY(!populateFreeList(C, ClassId, Sci)))
141         return nullptr;
142       B = popBatchImpl(C, ClassId, Sci);
143       // if `populateFreeList` succeeded, we are supposed to get free blocks.
144       DCHECK_NE(B, nullptr);
145     }
146     Sci->Stats.PoppedBlocks += B->getCount();
147     return B;
148   }
149 
150   // Push the array of free blocks to the designated batch group.
pushBlocks(CacheT * C,uptr ClassId,CompactPtrT * Array,u32 Size)151   void pushBlocks(CacheT *C, uptr ClassId, CompactPtrT *Array, u32 Size) {
152     DCHECK_LT(ClassId, NumClasses);
153     DCHECK_GT(Size, 0);
154 
155     SizeClassInfo *Sci = getSizeClassInfo(ClassId);
156     if (ClassId == SizeClassMap::BatchClassId) {
157       ScopedLock L(Sci->Mutex);
158       // Constructing a batch group in the free list will use two blocks in
159       // BatchClassId. If we are pushing BatchClassId blocks, we will use the
160       // blocks in the array directly (can't delegate local cache which will
161       // cause a recursive allocation). However, The number of free blocks may
162       // be less than two. Therefore, populate the free list before inserting
163       // the blocks.
164       if (Size == 1 && !populateFreeList(C, ClassId, Sci))
165         return;
166       pushBlocksImpl(C, ClassId, Sci, Array, Size);
167       Sci->Stats.PushedBlocks += Size;
168       return;
169     }
170 
171     // TODO(chiahungduan): Consider not doing grouping if the group size is not
172     // greater than the block size with a certain scale.
173 
174     // Sort the blocks so that blocks belonging to the same group can be pushed
175     // together.
176     bool SameGroup = true;
177     for (u32 I = 1; I < Size; ++I) {
178       if (compactPtrGroupBase(Array[I - 1]) != compactPtrGroupBase(Array[I]))
179         SameGroup = false;
180       CompactPtrT Cur = Array[I];
181       u32 J = I;
182       while (J > 0 &&
183              compactPtrGroupBase(Cur) < compactPtrGroupBase(Array[J - 1])) {
184         Array[J] = Array[J - 1];
185         --J;
186       }
187       Array[J] = Cur;
188     }
189 
190     ScopedLock L(Sci->Mutex);
191     pushBlocksImpl(C, ClassId, Sci, Array, Size, SameGroup);
192 
193     Sci->Stats.PushedBlocks += Size;
194     if (ClassId != SizeClassMap::BatchClassId)
195       releaseToOSMaybe(Sci, ClassId);
196   }
197 
disable()198   void disable() NO_THREAD_SAFETY_ANALYSIS {
199     // The BatchClassId must be locked last since other classes can use it.
200     for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
201       if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
202         continue;
203       getSizeClassInfo(static_cast<uptr>(I))->Mutex.lock();
204     }
205     getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.lock();
206     RegionsStashMutex.lock();
207     ByteMapMutex.lock();
208   }
209 
enable()210   void enable() NO_THREAD_SAFETY_ANALYSIS {
211     ByteMapMutex.unlock();
212     RegionsStashMutex.unlock();
213     getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.unlock();
214     for (uptr I = 0; I < NumClasses; I++) {
215       if (I == SizeClassMap::BatchClassId)
216         continue;
217       getSizeClassInfo(I)->Mutex.unlock();
218     }
219   }
220 
iterateOverBlocks(F Callback)221   template <typename F> void iterateOverBlocks(F Callback) {
222     uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
223     for (uptr I = 0; I < NumClasses; I++) {
224       SizeClassInfo *Sci = getSizeClassInfo(I);
225       // TODO: The call of `iterateOverBlocks` requires disabling
226       // SizeClassAllocator32. We may consider locking each region on demand
227       // only.
228       Sci->Mutex.assertHeld();
229       if (Sci->MinRegionIndex < MinRegionIndex)
230         MinRegionIndex = Sci->MinRegionIndex;
231       if (Sci->MaxRegionIndex > MaxRegionIndex)
232         MaxRegionIndex = Sci->MaxRegionIndex;
233     }
234 
235     // SizeClassAllocator32 is disabled, i.e., ByteMapMutex is held.
236     ByteMapMutex.assertHeld();
237 
238     for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
239       if (PossibleRegions[I] &&
240           (PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) {
241         const uptr BlockSize = getSizeByClassId(PossibleRegions[I] - 1U);
242         const uptr From = I * RegionSize;
243         const uptr To = From + (RegionSize / BlockSize) * BlockSize;
244         for (uptr Block = From; Block < To; Block += BlockSize)
245           Callback(Block);
246       }
247     }
248   }
249 
getStats(ScopedString * Str)250   void getStats(ScopedString *Str) {
251     // TODO(kostyak): get the RSS per region.
252     uptr TotalMapped = 0;
253     uptr PoppedBlocks = 0;
254     uptr PushedBlocks = 0;
255     for (uptr I = 0; I < NumClasses; I++) {
256       SizeClassInfo *Sci = getSizeClassInfo(I);
257       ScopedLock L(Sci->Mutex);
258       TotalMapped += Sci->AllocatedUser;
259       PoppedBlocks += Sci->Stats.PoppedBlocks;
260       PushedBlocks += Sci->Stats.PushedBlocks;
261     }
262     Str->append("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
263                 "remains %zu\n",
264                 TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks);
265     for (uptr I = 0; I < NumClasses; I++) {
266       SizeClassInfo *Sci = getSizeClassInfo(I);
267       ScopedLock L(Sci->Mutex);
268       getStats(Str, I, Sci, 0);
269     }
270   }
271 
setOption(Option O,sptr Value)272   bool setOption(Option O, sptr Value) {
273     if (O == Option::ReleaseInterval) {
274       const s32 Interval = Max(
275           Min(static_cast<s32>(Value), Config::PrimaryMaxReleaseToOsIntervalMs),
276           Config::PrimaryMinReleaseToOsIntervalMs);
277       atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
278       return true;
279     }
280     // Not supported by the Primary, but not an error either.
281     return true;
282   }
283 
releaseToOS(ReleaseToOS ReleaseType)284   uptr releaseToOS(ReleaseToOS ReleaseType) {
285     uptr TotalReleasedBytes = 0;
286     for (uptr I = 0; I < NumClasses; I++) {
287       if (I == SizeClassMap::BatchClassId)
288         continue;
289       SizeClassInfo *Sci = getSizeClassInfo(I);
290       ScopedLock L(Sci->Mutex);
291       TotalReleasedBytes += releaseToOSMaybe(Sci, I, ReleaseType);
292     }
293     return TotalReleasedBytes;
294   }
295 
getRegionInfoArrayAddress()296   const char *getRegionInfoArrayAddress() const { return nullptr; }
getRegionInfoArraySize()297   static uptr getRegionInfoArraySize() { return 0; }
298 
findNearestBlock(UNUSED const char * RegionInfoData,UNUSED uptr Ptr)299   static BlockInfo findNearestBlock(UNUSED const char *RegionInfoData,
300                                     UNUSED uptr Ptr) {
301     return {};
302   }
303 
304   AtomicOptions Options;
305 
306 private:
307   static const uptr NumClasses = SizeClassMap::NumClasses;
308   static const uptr RegionSize = 1UL << Config::PrimaryRegionSizeLog;
309   static const uptr NumRegions =
310       SCUDO_MMAP_RANGE_SIZE >> Config::PrimaryRegionSizeLog;
311   static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
312   typedef FlatByteMap<NumRegions> ByteMap;
313 
314   struct SizeClassStats {
315     uptr PoppedBlocks;
316     uptr PushedBlocks;
317   };
318 
319   struct ReleaseToOsInfo {
320     uptr BytesInFreeListAtLastCheckpoint;
321     uptr RangesReleased;
322     uptr LastReleasedBytes;
323     u64 LastReleaseAtNs;
324   };
325 
326   struct alignas(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
327     HybridMutex Mutex;
328     SinglyLinkedList<BatchGroup> FreeList GUARDED_BY(Mutex);
329     uptr CurrentRegion GUARDED_BY(Mutex);
330     uptr CurrentRegionAllocated GUARDED_BY(Mutex);
331     SizeClassStats Stats GUARDED_BY(Mutex);
332     u32 RandState;
333     uptr AllocatedUser GUARDED_BY(Mutex);
334     // Lowest & highest region index allocated for this size class, to avoid
335     // looping through the whole NumRegions.
336     uptr MinRegionIndex GUARDED_BY(Mutex);
337     uptr MaxRegionIndex GUARDED_BY(Mutex);
338     ReleaseToOsInfo ReleaseInfo GUARDED_BY(Mutex);
339   };
340   static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
341 
computeRegionId(uptr Mem)342   uptr computeRegionId(uptr Mem) {
343     const uptr Id = Mem >> Config::PrimaryRegionSizeLog;
344     CHECK_LT(Id, NumRegions);
345     return Id;
346   }
347 
allocateRegionSlow()348   uptr allocateRegionSlow() {
349     uptr MapSize = 2 * RegionSize;
350     const uptr MapBase = reinterpret_cast<uptr>(
351         map(nullptr, MapSize, "scudo:primary", MAP_ALLOWNOMEM));
352     if (!MapBase)
353       return 0;
354     const uptr MapEnd = MapBase + MapSize;
355     uptr Region = MapBase;
356     if (isAligned(Region, RegionSize)) {
357       ScopedLock L(RegionsStashMutex);
358       if (NumberOfStashedRegions < MaxStashedRegions)
359         RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize;
360       else
361         MapSize = RegionSize;
362     } else {
363       Region = roundUp(MapBase, RegionSize);
364       unmap(reinterpret_cast<void *>(MapBase), Region - MapBase);
365       MapSize = RegionSize;
366     }
367     const uptr End = Region + MapSize;
368     if (End != MapEnd)
369       unmap(reinterpret_cast<void *>(End), MapEnd - End);
370 
371     DCHECK_EQ(Region % RegionSize, 0U);
372     static_assert(Config::PrimaryRegionSizeLog == GroupSizeLog,
373                   "Memory group should be the same size as Region");
374 
375     return Region;
376   }
377 
allocateRegion(SizeClassInfo * Sci,uptr ClassId)378   uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) REQUIRES(Sci->Mutex) {
379     DCHECK_LT(ClassId, NumClasses);
380     uptr Region = 0;
381     {
382       ScopedLock L(RegionsStashMutex);
383       if (NumberOfStashedRegions > 0)
384         Region = RegionsStash[--NumberOfStashedRegions];
385     }
386     if (!Region)
387       Region = allocateRegionSlow();
388     if (LIKELY(Region)) {
389       // Sci->Mutex is held by the caller, updating the Min/Max is safe.
390       const uptr RegionIndex = computeRegionId(Region);
391       if (RegionIndex < Sci->MinRegionIndex)
392         Sci->MinRegionIndex = RegionIndex;
393       if (RegionIndex > Sci->MaxRegionIndex)
394         Sci->MaxRegionIndex = RegionIndex;
395       ScopedLock L(ByteMapMutex);
396       PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId + 1U));
397     }
398     return Region;
399   }
400 
getSizeClassInfo(uptr ClassId)401   SizeClassInfo *getSizeClassInfo(uptr ClassId) {
402     DCHECK_LT(ClassId, NumClasses);
403     return &SizeClassInfoArray[ClassId];
404   }
405 
406   // Push the blocks to their batch group. The layout will be like,
407   //
408   // FreeList - > BG -> BG -> BG
409   //              |     |     |
410   //              v     v     v
411   //              TB    TB    TB
412   //              |
413   //              v
414   //              TB
415   //
416   // Each BlockGroup(BG) will associate with unique group id and the free blocks
417   // are managed by a list of TransferBatch(TB). To reduce the time of inserting
418   // blocks, BGs are sorted and the input `Array` are supposed to be sorted so
419   // that we can get better performance of maintaining sorted property.
420   // Use `SameGroup=true` to indicate that all blocks in the array are from the
421   // same group then we will skip checking the group id of each block.
422   //
423   // The region mutex needs to be held while calling this method.
424   void pushBlocksImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci,
425                       CompactPtrT *Array, u32 Size, bool SameGroup = false)
426       REQUIRES(Sci->Mutex) {
427     DCHECK_GT(Size, 0U);
428 
429     auto CreateGroup = [&](uptr CompactPtrGroupBase) {
430       BatchGroup *BG = nullptr;
431       TransferBatch *TB = nullptr;
432       if (ClassId == SizeClassMap::BatchClassId) {
433         DCHECK_GE(Size, 2U);
434 
435         // Free blocks are recorded by TransferBatch in freelist, blocks of
436         // BatchClassId are included. In order not to use additional memory to
437         // record blocks of BatchClassId, they are self-contained. I.e., A
438         // TransferBatch may record the block address of itself. See the figure
439         // below:
440         //
441         // TransferBatch at 0xABCD
442         // +----------------------------+
443         // | Free blocks' addr          |
444         // | +------+------+------+     |
445         // | |0xABCD|...   |...   |     |
446         // | +------+------+------+     |
447         // +----------------------------+
448         //
449         // The safeness of manipulating TransferBatch is kept by the invariant,
450         //
451         //   The unit of each pop-block request is a TransferBatch. Return
452         //   part of the blocks in a TransferBatch is not allowed.
453         //
454         // This ensures that TransferBatch won't leak the address itself while
455         // it's still holding other valid data.
456         //
457         // Besides, BatchGroup uses the same size-class as TransferBatch does
458         // and its address is recorded in the TransferBatch too. To maintain the
459         // safeness, the invariant to keep is,
460         //
461         //   The address of itself is always recorded in the last TransferBatch
462         //   of the freelist (also imply that the freelist should only be
463         //   updated with push_front). Once the last TransferBatch is popped,
464         //   the BatchGroup becomes invalid.
465         //
466         // As a result, the blocks used by BatchGroup and TransferBatch are
467         // reusable and don't need additional space for them.
468         BG = reinterpret_cast<BatchGroup *>(
469             decompactPtr(ClassId, Array[Size - 1]));
470         BG->Batches.clear();
471 
472         TB = reinterpret_cast<TransferBatch *>(
473             decompactPtr(ClassId, Array[Size - 2]));
474         TB->clear();
475 
476         // Append the blocks used by BatchGroup and TransferBatch immediately so
477         // that we ensure that they are in the last TransBatch.
478         TB->appendFromArray(Array + Size - 2, 2);
479         Size -= 2;
480       } else {
481         BG = C->createGroup();
482         BG->Batches.clear();
483 
484         TB = C->createBatch(ClassId, nullptr);
485         TB->clear();
486       }
487 
488       BG->CompactPtrGroupBase = CompactPtrGroupBase;
489       // TODO(chiahungduan): Avoid the use of push_back() in `Batches`.
490       BG->Batches.push_front(TB);
491       BG->PushedBlocks = 0;
492       BG->BytesInBGAtLastCheckpoint = 0;
493       BG->MaxCachedPerBatch =
494           TransferBatch::getMaxCached(getSizeByClassId(ClassId));
495 
496       return BG;
497     };
498 
499     auto InsertBlocks = [&](BatchGroup *BG, CompactPtrT *Array, u32 Size) {
500       SinglyLinkedList<TransferBatch> &Batches = BG->Batches;
501       TransferBatch *CurBatch = Batches.front();
502       DCHECK_NE(CurBatch, nullptr);
503 
504       for (u32 I = 0; I < Size;) {
505         DCHECK_GE(BG->MaxCachedPerBatch, CurBatch->getCount());
506         u16 UnusedSlots =
507             static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
508         if (UnusedSlots == 0) {
509           CurBatch = C->createBatch(
510               ClassId,
511               reinterpret_cast<void *>(decompactPtr(ClassId, Array[I])));
512           CurBatch->clear();
513           Batches.push_front(CurBatch);
514           UnusedSlots = BG->MaxCachedPerBatch;
515         }
516         // `UnusedSlots` is u16 so the result will be also fit in u16.
517         u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
518         CurBatch->appendFromArray(&Array[I], AppendSize);
519         I += AppendSize;
520       }
521 
522       BG->PushedBlocks += Size;
523     };
524 
525     BatchGroup *Cur = Sci->FreeList.front();
526 
527     if (ClassId == SizeClassMap::BatchClassId) {
528       if (Cur == nullptr) {
529         // Don't need to classify BatchClassId.
530         Cur = CreateGroup(/*CompactPtrGroupBase=*/0);
531         Sci->FreeList.push_front(Cur);
532       }
533       InsertBlocks(Cur, Array, Size);
534       return;
535     }
536 
537     // In the following, `Cur` always points to the BatchGroup for blocks that
538     // will be pushed next. `Prev` is the element right before `Cur`.
539     BatchGroup *Prev = nullptr;
540 
541     while (Cur != nullptr &&
542            compactPtrGroupBase(Array[0]) > Cur->CompactPtrGroupBase) {
543       Prev = Cur;
544       Cur = Cur->Next;
545     }
546 
547     if (Cur == nullptr ||
548         compactPtrGroupBase(Array[0]) != Cur->CompactPtrGroupBase) {
549       Cur = CreateGroup(compactPtrGroupBase(Array[0]));
550       if (Prev == nullptr)
551         Sci->FreeList.push_front(Cur);
552       else
553         Sci->FreeList.insert(Prev, Cur);
554     }
555 
556     // All the blocks are from the same group, just push without checking group
557     // id.
558     if (SameGroup) {
559       for (u32 I = 0; I < Size; ++I)
560         DCHECK_EQ(compactPtrGroupBase(Array[I]), Cur->CompactPtrGroupBase);
561 
562       InsertBlocks(Cur, Array, Size);
563       return;
564     }
565 
566     // The blocks are sorted by group id. Determine the segment of group and
567     // push them to their group together.
568     u32 Count = 1;
569     for (u32 I = 1; I < Size; ++I) {
570       if (compactPtrGroupBase(Array[I - 1]) != compactPtrGroupBase(Array[I])) {
571         DCHECK_EQ(compactPtrGroupBase(Array[I - 1]), Cur->CompactPtrGroupBase);
572         InsertBlocks(Cur, Array + I - Count, Count);
573 
574         while (Cur != nullptr &&
575                compactPtrGroupBase(Array[I]) > Cur->CompactPtrGroupBase) {
576           Prev = Cur;
577           Cur = Cur->Next;
578         }
579 
580         if (Cur == nullptr ||
581             compactPtrGroupBase(Array[I]) != Cur->CompactPtrGroupBase) {
582           Cur = CreateGroup(compactPtrGroupBase(Array[I]));
583           DCHECK_NE(Prev, nullptr);
584           Sci->FreeList.insert(Prev, Cur);
585         }
586 
587         Count = 1;
588       } else {
589         ++Count;
590       }
591     }
592 
593     InsertBlocks(Cur, Array + Size - Count, Count);
594   }
595 
596   // Pop one TransferBatch from a BatchGroup. The BatchGroup with the smallest
597   // group id will be considered first.
598   //
599   // The region mutex needs to be held while calling this method.
popBatchImpl(CacheT * C,uptr ClassId,SizeClassInfo * Sci)600   TransferBatch *popBatchImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
601       REQUIRES(Sci->Mutex) {
602     if (Sci->FreeList.empty())
603       return nullptr;
604 
605     SinglyLinkedList<TransferBatch> &Batches = Sci->FreeList.front()->Batches;
606     DCHECK(!Batches.empty());
607 
608     TransferBatch *B = Batches.front();
609     Batches.pop_front();
610     DCHECK_NE(B, nullptr);
611     DCHECK_GT(B->getCount(), 0U);
612 
613     if (Batches.empty()) {
614       BatchGroup *BG = Sci->FreeList.front();
615       Sci->FreeList.pop_front();
616 
617       // We don't keep BatchGroup with zero blocks to avoid empty-checking while
618       // allocating. Note that block used by constructing BatchGroup is recorded
619       // as free blocks in the last element of BatchGroup::Batches. Which means,
620       // once we pop the last TransferBatch, the block is implicitly
621       // deallocated.
622       if (ClassId != SizeClassMap::BatchClassId)
623         C->deallocate(SizeClassMap::BatchClassId, BG);
624     }
625 
626     return B;
627   }
628 
populateFreeList(CacheT * C,uptr ClassId,SizeClassInfo * Sci)629   NOINLINE bool populateFreeList(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
630       REQUIRES(Sci->Mutex) {
631     uptr Region;
632     uptr Offset;
633     // If the size-class currently has a region associated to it, use it. The
634     // newly created blocks will be located after the currently allocated memory
635     // for that region (up to RegionSize). Otherwise, create a new region, where
636     // the new blocks will be carved from the beginning.
637     if (Sci->CurrentRegion) {
638       Region = Sci->CurrentRegion;
639       DCHECK_GT(Sci->CurrentRegionAllocated, 0U);
640       Offset = Sci->CurrentRegionAllocated;
641     } else {
642       DCHECK_EQ(Sci->CurrentRegionAllocated, 0U);
643       Region = allocateRegion(Sci, ClassId);
644       if (UNLIKELY(!Region))
645         return false;
646       C->getStats().add(StatMapped, RegionSize);
647       Sci->CurrentRegion = Region;
648       Offset = 0;
649     }
650 
651     const uptr Size = getSizeByClassId(ClassId);
652     const u16 MaxCount = TransferBatch::getMaxCached(Size);
653     DCHECK_GT(MaxCount, 0U);
654     // The maximum number of blocks we should carve in the region is dictated
655     // by the maximum number of batches we want to fill, and the amount of
656     // memory left in the current region (we use the lowest of the two). This
657     // will not be 0 as we ensure that a region can at least hold one block (via
658     // static_assert and at the end of this function).
659     const u32 NumberOfBlocks =
660         Min(MaxNumBatches * MaxCount,
661             static_cast<u32>((RegionSize - Offset) / Size));
662     DCHECK_GT(NumberOfBlocks, 0U);
663 
664     constexpr u32 ShuffleArraySize =
665         MaxNumBatches * TransferBatch::MaxNumCached;
666     // Fill the transfer batches and put them in the size-class freelist. We
667     // need to randomize the blocks for security purposes, so we first fill a
668     // local array that we then shuffle before populating the batches.
669     CompactPtrT ShuffleArray[ShuffleArraySize];
670     DCHECK_LE(NumberOfBlocks, ShuffleArraySize);
671 
672     uptr P = Region + Offset;
673     for (u32 I = 0; I < NumberOfBlocks; I++, P += Size)
674       ShuffleArray[I] = reinterpret_cast<CompactPtrT>(P);
675 
676     if (ClassId != SizeClassMap::BatchClassId) {
677       u32 N = 1;
678       uptr CurGroup = compactPtrGroupBase(ShuffleArray[0]);
679       for (u32 I = 1; I < NumberOfBlocks; I++) {
680         if (UNLIKELY(compactPtrGroupBase(ShuffleArray[I]) != CurGroup)) {
681           shuffle(ShuffleArray + I - N, N, &Sci->RandState);
682           pushBlocksImpl(C, ClassId, Sci, ShuffleArray + I - N, N,
683                          /*SameGroup=*/true);
684           N = 1;
685           CurGroup = compactPtrGroupBase(ShuffleArray[I]);
686         } else {
687           ++N;
688         }
689       }
690 
691       shuffle(ShuffleArray + NumberOfBlocks - N, N, &Sci->RandState);
692       pushBlocksImpl(C, ClassId, Sci, &ShuffleArray[NumberOfBlocks - N], N,
693                      /*SameGroup=*/true);
694     } else {
695       pushBlocksImpl(C, ClassId, Sci, ShuffleArray, NumberOfBlocks,
696                      /*SameGroup=*/true);
697     }
698 
699     const uptr AllocatedUser = Size * NumberOfBlocks;
700     C->getStats().add(StatFree, AllocatedUser);
701     DCHECK_LE(Sci->CurrentRegionAllocated + AllocatedUser, RegionSize);
702     // If there is not enough room in the region currently associated to fit
703     // more blocks, we deassociate the region by resetting CurrentRegion and
704     // CurrentRegionAllocated. Otherwise, update the allocated amount.
705     if (RegionSize - (Sci->CurrentRegionAllocated + AllocatedUser) < Size) {
706       Sci->CurrentRegion = 0;
707       Sci->CurrentRegionAllocated = 0;
708     } else {
709       Sci->CurrentRegionAllocated += AllocatedUser;
710     }
711     Sci->AllocatedUser += AllocatedUser;
712 
713     return true;
714   }
715 
getStats(ScopedString * Str,uptr ClassId,SizeClassInfo * Sci,uptr Rss)716   void getStats(ScopedString *Str, uptr ClassId, SizeClassInfo *Sci, uptr Rss)
717       REQUIRES(Sci->Mutex) {
718     if (Sci->AllocatedUser == 0)
719       return;
720     const uptr InUse = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks;
721     const uptr AvailableChunks = Sci->AllocatedUser / getSizeByClassId(ClassId);
722     Str->append("  %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
723                 "inuse: %6zu avail: %6zu rss: %6zuK releases: %6zu\n",
724                 ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
725                 Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks, InUse,
726                 AvailableChunks, Rss >> 10, Sci->ReleaseInfo.RangesReleased);
727   }
728 
729   NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
730                                  ReleaseToOS ReleaseType = ReleaseToOS::Normal)
731       REQUIRES(Sci->Mutex) {
732     const uptr BlockSize = getSizeByClassId(ClassId);
733     const uptr PageSize = getPageSizeCached();
734 
735     DCHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks);
736     const uptr BytesInFreeList =
737         Sci->AllocatedUser -
738         (Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks) * BlockSize;
739 
740     if (UNLIKELY(BytesInFreeList == 0))
741       return 0;
742 
743     bool MaySkip = false;
744 
745     if (BytesInFreeList <= Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint) {
746       Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
747       MaySkip = true;
748     }
749 
750     // Always update `BytesInFreeListAtLastCheckpoint` with the smallest value
751     // so that we won't underestimate the releasable pages. For example, the
752     // following is the region usage,
753     //
754     //  BytesInFreeListAtLastCheckpoint   AllocatedUser
755     //                v                         v
756     //  |--------------------------------------->
757     //         ^                   ^
758     //  BytesInFreeList     ReleaseThreshold
759     //
760     // In general, if we have collected enough bytes and the amount of free
761     // bytes meets the ReleaseThreshold, we will try to do page release. If we
762     // don't update `BytesInFreeListAtLastCheckpoint` when the current
763     // `BytesInFreeList` is smaller, we may take longer time to wait for enough
764     // freed blocks because we miss the bytes between
765     // (BytesInFreeListAtLastCheckpoint - BytesInFreeList).
766     const uptr PushedBytesDelta =
767         BytesInFreeList - Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
768     if (PushedBytesDelta < PageSize)
769       MaySkip = true;
770 
771     const bool CheckDensity =
772         BlockSize < PageSize / 16U && ReleaseType != ReleaseToOS::ForceAll;
773     // Releasing smaller blocks is expensive, so we want to make sure that a
774     // significant amount of bytes are free, and that there has been a good
775     // amount of batches pushed to the freelist before attempting to release.
776     if (CheckDensity) {
777       if (ReleaseType == ReleaseToOS::Normal &&
778           PushedBytesDelta < Sci->AllocatedUser / 16U) {
779         MaySkip = true;
780       }
781     }
782 
783     if (MaySkip && ReleaseType != ReleaseToOS::ForceAll)
784       return 0;
785 
786     if (ReleaseType == ReleaseToOS::Normal) {
787       const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs);
788       if (IntervalMs < 0)
789         return 0;
790       if (Sci->ReleaseInfo.LastReleaseAtNs +
791               static_cast<u64>(IntervalMs) * 1000000 >
792           getMonotonicTimeFast()) {
793         return 0; // Memory was returned recently.
794       }
795     }
796 
797     const uptr First = Sci->MinRegionIndex;
798     const uptr Last = Sci->MaxRegionIndex;
799     DCHECK_NE(Last, 0U);
800     DCHECK_LE(First, Last);
801     uptr TotalReleasedBytes = 0;
802     const uptr Base = First * RegionSize;
803     const uptr NumberOfRegions = Last - First + 1U;
804     const uptr GroupSize = (1U << GroupSizeLog);
805     const uptr CurGroupBase =
806         compactPtrGroupBase(compactPtr(ClassId, Sci->CurrentRegion));
807 
808     ReleaseRecorder Recorder(Base);
809     PageReleaseContext Context(BlockSize, NumberOfRegions,
810                                /*ReleaseSize=*/RegionSize);
811 
812     auto DecompactPtr = [](CompactPtrT CompactPtr) {
813       return reinterpret_cast<uptr>(CompactPtr);
814     };
815     for (BatchGroup &BG : Sci->FreeList) {
816       const uptr GroupBase = decompactGroupBase(BG.CompactPtrGroupBase);
817       // The `GroupSize` may not be divided by `BlockSize`, which means there is
818       // an unused space at the end of Region. Exclude that space to avoid
819       // unused page map entry.
820       uptr AllocatedGroupSize = GroupBase == CurGroupBase
821                                     ? Sci->CurrentRegionAllocated
822                                     : roundDownSlow(GroupSize, BlockSize);
823       if (AllocatedGroupSize == 0)
824         continue;
825 
826       // TransferBatches are pushed in front of BG.Batches. The first one may
827       // not have all caches used.
828       const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
829                              BG.Batches.front()->getCount();
830       const uptr BytesInBG = NumBlocks * BlockSize;
831 
832       if (ReleaseType != ReleaseToOS::ForceAll &&
833           BytesInBG <= BG.BytesInBGAtLastCheckpoint) {
834         BG.BytesInBGAtLastCheckpoint = BytesInBG;
835         continue;
836       }
837       const uptr PushedBytesDelta = BytesInBG - BG.BytesInBGAtLastCheckpoint;
838       if (PushedBytesDelta < PageSize)
839         continue;
840 
841       // Given the randomness property, we try to release the pages only if the
842       // bytes used by free blocks exceed certain proportion of allocated
843       // spaces.
844       if (CheckDensity && (BytesInBG * 100U) / AllocatedGroupSize <
845                               (100U - 1U - BlockSize / 16U)) {
846         continue;
847       }
848 
849       // TODO: Consider updating this after page release if `ReleaseRecorder`
850       // can tell the releasd bytes in each group.
851       BG.BytesInBGAtLastCheckpoint = BytesInBG;
852 
853       const uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize;
854       const uptr RegionIndex = (GroupBase - Base) / RegionSize;
855 
856       if (NumBlocks == MaxContainedBlocks) {
857         for (const auto &It : BG.Batches)
858           for (u16 I = 0; I < It.getCount(); ++I)
859             DCHECK_EQ(compactPtrGroupBase(It.get(I)), BG.CompactPtrGroupBase);
860 
861         const uptr To = GroupBase + AllocatedGroupSize;
862         Context.markRangeAsAllCounted(GroupBase, To, GroupBase, RegionIndex,
863                                       AllocatedGroupSize);
864       } else {
865         DCHECK_LT(NumBlocks, MaxContainedBlocks);
866 
867         // Note that we don't always visit blocks in each BatchGroup so that we
868         // may miss the chance of releasing certain pages that cross
869         // BatchGroups.
870         Context.markFreeBlocksInRegion(BG.Batches, DecompactPtr, GroupBase,
871                                        RegionIndex, AllocatedGroupSize,
872                                        /*MayContainLastBlockInRegion=*/true);
873       }
874 
875       // We may not be able to do the page release In a rare case that we may
876       // fail on PageMap allocation.
877       if (UNLIKELY(!Context.hasBlockMarked()))
878         return 0;
879     }
880 
881     if (!Context.hasBlockMarked())
882       return 0;
883 
884     auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
885       ScopedLock L(ByteMapMutex);
886       return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
887     };
888     releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
889 
890     if (Recorder.getReleasedRangesCount() > 0) {
891       Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
892       Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
893       Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
894       TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
895     }
896     Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
897 
898     return TotalReleasedBytes;
899   }
900 
901   SizeClassInfo SizeClassInfoArray[NumClasses] = {};
902 
903   HybridMutex ByteMapMutex;
904   // Track the regions in use, 0 is unused, otherwise store ClassId + 1.
905   ByteMap PossibleRegions GUARDED_BY(ByteMapMutex) = {};
906   atomic_s32 ReleaseToOsIntervalMs = {};
907   // Unless several threads request regions simultaneously from different size
908   // classes, the stash rarely contains more than 1 entry.
909   static constexpr uptr MaxStashedRegions = 4;
910   HybridMutex RegionsStashMutex;
911   uptr NumberOfStashedRegions GUARDED_BY(RegionsStashMutex) = 0;
912   uptr RegionsStash[MaxStashedRegions] GUARDED_BY(RegionsStashMutex) = {};
913 };
914 
915 } // namespace scudo
916 
917 #endif // SCUDO_PRIMARY32_H_
918