• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- secondary.h ---------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_SECONDARY_H_
10 #define SCUDO_SECONDARY_H_
11 
12 #include "chunk.h"
13 #include "common.h"
14 #include "list.h"
15 #include "mem_map.h"
16 #include "memtag.h"
17 #include "mutex.h"
18 #include "options.h"
19 #include "stats.h"
20 #include "string_utils.h"
21 #include "thread_annotations.h"
22 #include "vector.h"
23 
24 namespace scudo {
25 
26 // This allocator wraps the platform allocation primitives, and as such is on
27 // the slower side and should preferably be used for larger sized allocations.
28 // Blocks allocated will be preceded and followed by a guard page, and hold
29 // their own header that is not checksummed: the guard pages and the Combined
30 // header should be enough for our purpose.
31 
32 namespace LargeBlock {
33 
34 struct alignas(Max<uptr>(archSupportsMemoryTagging()
35                              ? archMemoryTagGranuleSize()
36                              : 1,
37                          1U << SCUDO_MIN_ALIGNMENT_LOG)) Header {
38   LargeBlock::Header *Prev;
39   LargeBlock::Header *Next;
40   uptr CommitBase;
41   uptr CommitSize;
42   MemMapT MemMap;
43 };
44 
45 static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, "");
46 static_assert(!archSupportsMemoryTagging() ||
47                   sizeof(Header) % archMemoryTagGranuleSize() == 0,
48               "");
49 
getHeaderSize()50 constexpr uptr getHeaderSize() { return sizeof(Header); }
51 
addHeaderTag(uptr Ptr)52 template <typename Config> static uptr addHeaderTag(uptr Ptr) {
53   if (allocatorSupportsMemoryTagging<Config>())
54     return addFixedTag(Ptr, 1);
55   return Ptr;
56 }
57 
getHeader(uptr Ptr)58 template <typename Config> static Header *getHeader(uptr Ptr) {
59   return reinterpret_cast<Header *>(addHeaderTag<Config>(Ptr)) - 1;
60 }
61 
getHeader(const void * Ptr)62 template <typename Config> static Header *getHeader(const void *Ptr) {
63   return getHeader<Config>(reinterpret_cast<uptr>(Ptr));
64 }
65 
66 } // namespace LargeBlock
67 
unmap(MemMapT & MemMap)68 static inline void unmap(MemMapT &MemMap) { MemMap.unmap(); }
69 
70 namespace {
71 
72 struct CachedBlock {
73   static constexpr u16 CacheIndexMax = UINT16_MAX;
74   static constexpr u16 EndOfListVal = CacheIndexMax;
75 
76   // We allow a certain amount of fragmentation and part of the fragmented bytes
77   // will be released by `releaseAndZeroPagesToOS()`. This increases the chance
78   // of cache hit rate and reduces the overhead to the RSS at the same time. See
79   // more details in the `MapAllocatorCache::retrieve()` section.
80   //
81   // We arrived at this default value after noticing that mapping in larger
82   // memory regions performs better than releasing memory and forcing a cache
83   // hit. According to the data, it suggests that beyond 4 pages, the release
84   // execution time is longer than the map execution time. In this way,
85   // the default is dependent on the platform.
86   static constexpr uptr MaxReleasedCachePages = 4U;
87 
88   uptr CommitBase = 0;
89   uptr CommitSize = 0;
90   uptr BlockBegin = 0;
91   MemMapT MemMap = {};
92   u64 Time = 0;
93   u16 Next = 0;
94   u16 Prev = 0;
95 
isValidCachedBlock96   bool isValid() { return CommitBase != 0; }
97 
invalidateCachedBlock98   void invalidate() { CommitBase = 0; }
99 };
100 } // namespace
101 
102 template <typename Config> class MapAllocatorNoCache {
103 public:
init(UNUSED s32 ReleaseToOsInterval)104   void init(UNUSED s32 ReleaseToOsInterval) {}
retrieve(UNUSED uptr MaxAllowedFragmentedBytes,UNUSED uptr Size,UNUSED uptr Alignment,UNUSED uptr HeadersSize,UNUSED uptr & EntryHeaderPos)105   CachedBlock retrieve(UNUSED uptr MaxAllowedFragmentedBytes, UNUSED uptr Size,
106                        UNUSED uptr Alignment, UNUSED uptr HeadersSize,
107                        UNUSED uptr &EntryHeaderPos) {
108     return {};
109   }
store(UNUSED Options Options,UNUSED uptr CommitBase,UNUSED uptr CommitSize,UNUSED uptr BlockBegin,UNUSED MemMapT MemMap)110   void store(UNUSED Options Options, UNUSED uptr CommitBase,
111              UNUSED uptr CommitSize, UNUSED uptr BlockBegin,
112              UNUSED MemMapT MemMap) {
113     // This should never be called since canCache always returns false.
114     UNREACHABLE(
115         "It is not valid to call store on MapAllocatorNoCache objects.");
116   }
117 
canCache(UNUSED uptr Size)118   bool canCache(UNUSED uptr Size) { return false; }
disable()119   void disable() {}
enable()120   void enable() {}
releaseToOS()121   void releaseToOS() {}
disableMemoryTagging()122   void disableMemoryTagging() {}
unmapTestOnly()123   void unmapTestOnly() {}
setOption(Option O,UNUSED sptr Value)124   bool setOption(Option O, UNUSED sptr Value) {
125     if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount ||
126         O == Option::MaxCacheEntrySize)
127       return false;
128     // Not supported by the Secondary Cache, but not an error either.
129     return true;
130   }
131 
getStats(UNUSED ScopedString * Str)132   void getStats(UNUSED ScopedString *Str) {
133     Str->append("Secondary Cache Disabled\n");
134   }
135 };
136 
137 static const uptr MaxUnreleasedCachePages = 4U;
138 
139 template <typename Config>
mapSecondary(const Options & Options,uptr CommitBase,uptr CommitSize,uptr AllocPos,uptr Flags,MemMapT & MemMap)140 bool mapSecondary(const Options &Options, uptr CommitBase, uptr CommitSize,
141                   uptr AllocPos, uptr Flags, MemMapT &MemMap) {
142   Flags |= MAP_RESIZABLE;
143   Flags |= MAP_ALLOWNOMEM;
144 
145   const uptr PageSize = getPageSizeCached();
146   if (SCUDO_TRUSTY) {
147     /*
148      * On Trusty we need AllocPos to be usable for shared memory, which cannot
149      * cross multiple mappings. This means we need to split around AllocPos
150      * and not over it. We can only do this if the address is page-aligned.
151      */
152     const uptr TaggedSize = AllocPos - CommitBase;
153     if (useMemoryTagging<Config>(Options) && isAligned(TaggedSize, PageSize)) {
154       DCHECK_GT(TaggedSize, 0);
155       return MemMap.remap(CommitBase, TaggedSize, "scudo:secondary",
156                           MAP_MEMTAG | Flags) &&
157              MemMap.remap(AllocPos, CommitSize - TaggedSize, "scudo:secondary",
158                           Flags);
159     } else {
160       const uptr RemapFlags =
161           (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags;
162       return MemMap.remap(CommitBase, CommitSize, "scudo:secondary",
163                           RemapFlags);
164     }
165   }
166 
167   const uptr MaxUnreleasedCacheBytes = MaxUnreleasedCachePages * PageSize;
168   if (useMemoryTagging<Config>(Options) &&
169       CommitSize > MaxUnreleasedCacheBytes) {
170     const uptr UntaggedPos =
171         Max(AllocPos, CommitBase + MaxUnreleasedCacheBytes);
172     return MemMap.remap(CommitBase, UntaggedPos - CommitBase, "scudo:secondary",
173                         MAP_MEMTAG | Flags) &&
174            MemMap.remap(UntaggedPos, CommitBase + CommitSize - UntaggedPos,
175                         "scudo:secondary", Flags);
176   } else {
177     const uptr RemapFlags =
178         (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags;
179     return MemMap.remap(CommitBase, CommitSize, "scudo:secondary", RemapFlags);
180   }
181 }
182 
183 // Template specialization to avoid producing zero-length array
184 template <typename T, size_t Size> class NonZeroLengthArray {
185 public:
186   T &operator[](uptr Idx) { return values[Idx]; }
187 
188 private:
189   T values[Size];
190 };
191 template <typename T> class NonZeroLengthArray<T, 0> {
192 public:
193   T &operator[](uptr UNUSED Idx) { UNREACHABLE("Unsupported!"); }
194 };
195 
196 // The default unmap callback is simply scudo::unmap.
197 // In testing, a different unmap callback is used to
198 // record information about unmaps in the cache
199 template <typename Config, void (*unmapCallBack)(MemMapT &) = unmap>
200 class MapAllocatorCache {
201 public:
getStats(ScopedString * Str)202   void getStats(ScopedString *Str) {
203     ScopedLock L(Mutex);
204     uptr Integral;
205     uptr Fractional;
206     computePercentage(SuccessfulRetrieves, CallsToRetrieve, &Integral,
207                       &Fractional);
208     const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
209     Str->append(
210         "Stats: MapAllocatorCache: EntriesCount: %zu, "
211         "MaxEntriesCount: %u, MaxEntrySize: %zu, ReleaseToOsIntervalMs = %d\n",
212         LRUEntries.size(), atomic_load_relaxed(&MaxEntriesCount),
213         atomic_load_relaxed(&MaxEntrySize), Interval >= 0 ? Interval : -1);
214     Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u "
215                 "(%zu.%02zu%%)\n",
216                 SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
217     Str->append("Cache Entry Info (Most Recent -> Least Recent):\n");
218 
219     for (CachedBlock &Entry : LRUEntries) {
220       Str->append("  StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
221                   "BlockSize: %zu %s\n",
222                   Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
223                   Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
224     }
225   }
226 
227   // Ensure the default maximum specified fits the array.
228   static_assert(Config::getDefaultMaxEntriesCount() <=
229                     Config::getEntriesArraySize(),
230                 "");
231   // Ensure the cache entry array size fits in the LRU list Next and Prev
232   // index fields
233   static_assert(Config::getEntriesArraySize() <= CachedBlock::CacheIndexMax,
234                 "Cache entry array is too large to be indexed.");
235 
init(s32 ReleaseToOsInterval)236   void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
237     DCHECK_EQ(LRUEntries.size(), 0U);
238     setOption(Option::MaxCacheEntriesCount,
239               static_cast<sptr>(Config::getDefaultMaxEntriesCount()));
240     setOption(Option::MaxCacheEntrySize,
241               static_cast<sptr>(Config::getDefaultMaxEntrySize()));
242     // The default value in the cache config has the higher priority.
243     if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN)
244       ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs();
245     setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
246 
247     LRUEntries.clear();
248     LRUEntries.init(Entries, sizeof(Entries));
249 
250     AvailEntries.clear();
251     AvailEntries.init(Entries, sizeof(Entries));
252     for (u32 I = 0; I < Config::getEntriesArraySize(); I++)
253       AvailEntries.push_back(&Entries[I]);
254   }
255 
store(const Options & Options,uptr CommitBase,uptr CommitSize,uptr BlockBegin,MemMapT MemMap)256   void store(const Options &Options, uptr CommitBase, uptr CommitSize,
257              uptr BlockBegin, MemMapT MemMap) EXCLUDES(Mutex) {
258     DCHECK(canCache(CommitSize));
259 
260     const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
261     u64 Time;
262     CachedBlock Entry;
263 
264     Entry.CommitBase = CommitBase;
265     Entry.CommitSize = CommitSize;
266     Entry.BlockBegin = BlockBegin;
267     Entry.MemMap = MemMap;
268     Entry.Time = UINT64_MAX;
269 
270     if (useMemoryTagging<Config>(Options)) {
271       if (Interval == 0 && !SCUDO_FUCHSIA) {
272         // Release the memory and make it inaccessible at the same time by
273         // creating a new MAP_NOACCESS mapping on top of the existing mapping.
274         // Fuchsia does not support replacing mappings by creating a new mapping
275         // on top so we just do the two syscalls there.
276         Entry.Time = 0;
277         mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize,
278                              Entry.CommitBase, MAP_NOACCESS, Entry.MemMap);
279       } else {
280         Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize,
281                                          MAP_NOACCESS);
282       }
283     }
284 
285     // Usually only one entry will be evicted from the cache.
286     // Only in the rare event that the cache shrinks in real-time
287     // due to a decrease in the configurable value MaxEntriesCount
288     // will more than one cache entry be evicted.
289     // The vector is used to save the MemMaps of evicted entries so
290     // that the unmap call can be performed outside the lock
291     Vector<MemMapT, 1U> EvictionMemMaps;
292 
293     do {
294       ScopedLock L(Mutex);
295 
296       // Time must be computed under the lock to ensure
297       // that the LRU cache remains sorted with respect to
298       // time in a multithreaded environment
299       Time = getMonotonicTimeFast();
300       if (Entry.Time != 0)
301         Entry.Time = Time;
302 
303       if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) {
304         // If we get here then memory tagging was disabled in between when we
305         // read Options and when we locked Mutex. We can't insert our entry into
306         // the quarantine or the cache because the permissions would be wrong so
307         // just unmap it.
308         unmapCallBack(Entry.MemMap);
309         break;
310       }
311       if (Config::getQuarantineSize() && useMemoryTagging<Config>(Options)) {
312         QuarantinePos =
313             (QuarantinePos + 1) % Max(Config::getQuarantineSize(), 1u);
314         if (!Quarantine[QuarantinePos].isValid()) {
315           Quarantine[QuarantinePos] = Entry;
316           return;
317         }
318         CachedBlock PrevEntry = Quarantine[QuarantinePos];
319         Quarantine[QuarantinePos] = Entry;
320         if (OldestTime == 0)
321           OldestTime = Entry.Time;
322         Entry = PrevEntry;
323       }
324 
325       // All excess entries are evicted from the cache. Note that when
326       // `MaxEntriesCount` is zero, cache storing shouldn't happen and it's
327       // guarded by the `DCHECK(canCache(CommitSize))` above. As a result, we
328       // won't try to pop `LRUEntries` when it's empty.
329       while (LRUEntries.size() >= atomic_load_relaxed(&MaxEntriesCount)) {
330         // Save MemMaps of evicted entries to perform unmap outside of lock
331         CachedBlock *Entry = LRUEntries.back();
332         EvictionMemMaps.push_back(Entry->MemMap);
333         remove(Entry);
334       }
335 
336       insert(Entry);
337 
338       if (OldestTime == 0)
339         OldestTime = Entry.Time;
340     } while (0);
341 
342     for (MemMapT &EvictMemMap : EvictionMemMaps)
343       unmapCallBack(EvictMemMap);
344 
345     if (Interval >= 0) {
346       // TODO: Add ReleaseToOS logic to LRU algorithm
347       releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
348     }
349   }
350 
retrieve(uptr MaxAllowedFragmentedPages,uptr Size,uptr Alignment,uptr HeadersSize,uptr & EntryHeaderPos)351   CachedBlock retrieve(uptr MaxAllowedFragmentedPages, uptr Size,
352                        uptr Alignment, uptr HeadersSize, uptr &EntryHeaderPos)
353       EXCLUDES(Mutex) {
354     const uptr PageSize = getPageSizeCached();
355     // 10% of the requested size proved to be the optimal choice for
356     // retrieving cached blocks after testing several options.
357     constexpr u32 FragmentedBytesDivisor = 10;
358     CachedBlock Entry;
359     EntryHeaderPos = 0;
360     {
361       ScopedLock L(Mutex);
362       CallsToRetrieve++;
363       if (LRUEntries.size() == 0)
364         return {};
365       CachedBlock *RetrievedEntry = nullptr;
366       uptr MinDiff = UINTPTR_MAX;
367 
368       //  Since allocation sizes don't always match cached memory chunk sizes
369       //  we allow some memory to be unused (called fragmented bytes). The
370       //  amount of unused bytes is exactly EntryHeaderPos - CommitBase.
371       //
372       //        CommitBase                CommitBase + CommitSize
373       //          V                              V
374       //      +---+------------+-----------------+---+
375       //      |   |            |                 |   |
376       //      +---+------------+-----------------+---+
377       //      ^                ^                     ^
378       //    Guard         EntryHeaderPos          Guard-page-end
379       //    page-begin
380       //
381       //  [EntryHeaderPos, CommitBase + CommitSize) contains the user data as
382       //  well as the header metadata. If EntryHeaderPos - CommitBase exceeds
383       //  MaxAllowedFragmentedPages * PageSize, the cached memory chunk is
384       //  not considered valid for retrieval.
385       for (CachedBlock &Entry : LRUEntries) {
386         const uptr CommitBase = Entry.CommitBase;
387         const uptr CommitSize = Entry.CommitSize;
388         const uptr AllocPos =
389             roundDown(CommitBase + CommitSize - Size, Alignment);
390         const uptr HeaderPos = AllocPos - HeadersSize;
391         const uptr MaxAllowedFragmentedBytes =
392             MaxAllowedFragmentedPages * PageSize;
393         if (HeaderPos > CommitBase + CommitSize)
394           continue;
395         // TODO: Remove AllocPos > CommitBase + MaxAllowedFragmentedBytes
396         // and replace with Diff > MaxAllowedFragmentedBytes
397         if (HeaderPos < CommitBase ||
398             AllocPos > CommitBase + MaxAllowedFragmentedBytes) {
399           continue;
400         }
401 
402         const uptr Diff = roundDown(HeaderPos, PageSize) - CommitBase;
403 
404         // Keep track of the smallest cached block
405         // that is greater than (AllocSize + HeaderSize)
406         if (Diff >= MinDiff)
407           continue;
408 
409         MinDiff = Diff;
410         RetrievedEntry = &Entry;
411         EntryHeaderPos = HeaderPos;
412 
413         // Immediately use a cached block if its size is close enough to the
414         // requested size
415         const uptr OptimalFitThesholdBytes =
416             (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
417         if (Diff <= OptimalFitThesholdBytes)
418           break;
419       }
420 
421       if (RetrievedEntry != nullptr) {
422         Entry = *RetrievedEntry;
423         remove(RetrievedEntry);
424         SuccessfulRetrieves++;
425       }
426     }
427 
428     //  The difference between the retrieved memory chunk and the request
429     //  size is at most MaxAllowedFragmentedPages
430     //
431     // +- MaxAllowedFragmentedPages * PageSize -+
432     // +--------------------------+-------------+
433     // |                          |             |
434     // +--------------------------+-------------+
435     //  \ Bytes to be released   /        ^
436     //                                    |
437     //                           (may or may not be committed)
438     //
439     //   The maximum number of bytes released to the OS is capped by
440     //   MaxReleasedCachePages
441     //
442     //   TODO : Consider making MaxReleasedCachePages configurable since
443     //   the release to OS API can vary across systems.
444     if (Entry.Time != 0) {
445       const uptr FragmentedBytes =
446           roundDown(EntryHeaderPos, PageSize) - Entry.CommitBase;
447       const uptr MaxUnreleasedCacheBytes = MaxUnreleasedCachePages * PageSize;
448       if (FragmentedBytes > MaxUnreleasedCacheBytes) {
449         const uptr MaxReleasedCacheBytes =
450             CachedBlock::MaxReleasedCachePages * PageSize;
451         uptr BytesToRelease =
452             roundUp(Min<uptr>(MaxReleasedCacheBytes,
453                               FragmentedBytes - MaxUnreleasedCacheBytes),
454                     PageSize);
455         Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, BytesToRelease);
456       }
457     }
458 
459     return Entry;
460   }
461 
canCache(uptr Size)462   bool canCache(uptr Size) {
463     return atomic_load_relaxed(&MaxEntriesCount) != 0U &&
464            Size <= atomic_load_relaxed(&MaxEntrySize);
465   }
466 
setOption(Option O,sptr Value)467   bool setOption(Option O, sptr Value) {
468     if (O == Option::ReleaseInterval) {
469       const s32 Interval = Max(
470           Min(static_cast<s32>(Value), Config::getMaxReleaseToOsIntervalMs()),
471           Config::getMinReleaseToOsIntervalMs());
472       atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
473       return true;
474     }
475     if (O == Option::MaxCacheEntriesCount) {
476       if (Value < 0)
477         return false;
478       atomic_store_relaxed(
479           &MaxEntriesCount,
480           Min<u32>(static_cast<u32>(Value), Config::getEntriesArraySize()));
481       return true;
482     }
483     if (O == Option::MaxCacheEntrySize) {
484       atomic_store_relaxed(&MaxEntrySize, static_cast<uptr>(Value));
485       return true;
486     }
487     // Not supported by the Secondary Cache, but not an error either.
488     return true;
489   }
490 
releaseToOS()491   void releaseToOS() { releaseOlderThan(UINT64_MAX); }
492 
disableMemoryTagging()493   void disableMemoryTagging() EXCLUDES(Mutex) {
494     ScopedLock L(Mutex);
495     for (u32 I = 0; I != Config::getQuarantineSize(); ++I) {
496       if (Quarantine[I].isValid()) {
497         MemMapT &MemMap = Quarantine[I].MemMap;
498         unmapCallBack(MemMap);
499         Quarantine[I].invalidate();
500       }
501     }
502     for (CachedBlock &Entry : LRUEntries)
503       Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
504     QuarantinePos = -1U;
505   }
506 
disable()507   void disable() NO_THREAD_SAFETY_ANALYSIS { Mutex.lock(); }
508 
enable()509   void enable() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
510 
unmapTestOnly()511   void unmapTestOnly() { empty(); }
512 
513 private:
insert(const CachedBlock & Entry)514   void insert(const CachedBlock &Entry) REQUIRES(Mutex) {
515     CachedBlock *AvailEntry = AvailEntries.front();
516     AvailEntries.pop_front();
517 
518     *AvailEntry = Entry;
519     LRUEntries.push_front(AvailEntry);
520   }
521 
remove(CachedBlock * Entry)522   void remove(CachedBlock *Entry) REQUIRES(Mutex) {
523     DCHECK(Entry->isValid());
524     LRUEntries.remove(Entry);
525     Entry->invalidate();
526     AvailEntries.push_front(Entry);
527   }
528 
empty()529   void empty() {
530     MemMapT MapInfo[Config::getEntriesArraySize()];
531     uptr N = 0;
532     {
533       ScopedLock L(Mutex);
534 
535       for (CachedBlock &Entry : LRUEntries)
536         MapInfo[N++] = Entry.MemMap;
537       LRUEntries.clear();
538     }
539     for (uptr I = 0; I < N; I++) {
540       MemMapT &MemMap = MapInfo[I];
541       unmapCallBack(MemMap);
542     }
543   }
544 
releaseIfOlderThan(CachedBlock & Entry,u64 Time)545   void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
546     if (!Entry.isValid() || !Entry.Time)
547       return;
548     if (Entry.Time > Time) {
549       if (OldestTime == 0 || Entry.Time < OldestTime)
550         OldestTime = Entry.Time;
551       return;
552     }
553     Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
554     Entry.Time = 0;
555   }
556 
releaseOlderThan(u64 Time)557   void releaseOlderThan(u64 Time) EXCLUDES(Mutex) {
558     ScopedLock L(Mutex);
559     if (!LRUEntries.size() || OldestTime == 0 || OldestTime > Time)
560       return;
561     OldestTime = 0;
562     for (uptr I = 0; I < Config::getQuarantineSize(); I++)
563       releaseIfOlderThan(Quarantine[I], Time);
564     for (uptr I = 0; I < Config::getEntriesArraySize(); I++)
565       releaseIfOlderThan(Entries[I], Time);
566   }
567 
568   HybridMutex Mutex;
569   u32 QuarantinePos GUARDED_BY(Mutex) = 0;
570   atomic_u32 MaxEntriesCount = {};
571   atomic_uptr MaxEntrySize = {};
572   u64 OldestTime GUARDED_BY(Mutex) = 0;
573   atomic_s32 ReleaseToOsIntervalMs = {};
574   u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
575   u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;
576 
577   CachedBlock Entries[Config::getEntriesArraySize()] GUARDED_BY(Mutex) = {};
578   NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
579       Quarantine GUARDED_BY(Mutex) = {};
580 
581   // Cached blocks stored in LRU order
582   DoublyLinkedList<CachedBlock> LRUEntries GUARDED_BY(Mutex);
583   // The unused Entries
584   SinglyLinkedList<CachedBlock> AvailEntries GUARDED_BY(Mutex);
585 };
586 
587 template <typename Config> class MapAllocator {
588 public:
589   void init(GlobalStats *S,
590             s32 ReleaseToOsInterval = -1) NO_THREAD_SAFETY_ANALYSIS {
591     DCHECK_EQ(AllocatedBytes, 0U);
592     DCHECK_EQ(FreedBytes, 0U);
593     Cache.init(ReleaseToOsInterval);
594     Stats.init();
595     if (LIKELY(S))
596       S->link(&Stats);
597   }
598 
599   void *allocate(const Options &Options, uptr Size, uptr AlignmentHint = 0,
600                  uptr *BlockEnd = nullptr,
601                  FillContentsMode FillContents = NoFill);
602 
603   void deallocate(const Options &Options, void *Ptr);
604 
605   void *tryAllocateFromCache(const Options &Options, uptr Size, uptr Alignment,
606                              uptr *BlockEndPtr, FillContentsMode FillContents);
607 
getBlockEnd(void * Ptr)608   static uptr getBlockEnd(void *Ptr) {
609     auto *B = LargeBlock::getHeader<Config>(Ptr);
610     return B->CommitBase + B->CommitSize;
611   }
612 
getBlockSize(void * Ptr)613   static uptr getBlockSize(void *Ptr) {
614     return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
615   }
616 
getGuardPageSize()617   static uptr getGuardPageSize() {
618     if (Config::getEnableGuardPages())
619       return getPageSizeCached();
620     return 0U;
621   }
622 
getHeadersSize()623   static constexpr uptr getHeadersSize() {
624     return Chunk::getHeaderSize() + LargeBlock::getHeaderSize();
625   }
626 
disable()627   void disable() NO_THREAD_SAFETY_ANALYSIS {
628     Mutex.lock();
629     Cache.disable();
630   }
631 
enable()632   void enable() NO_THREAD_SAFETY_ANALYSIS {
633     Cache.enable();
634     Mutex.unlock();
635   }
636 
iterateOverBlocks(F Callback)637   template <typename F> void iterateOverBlocks(F Callback) const {
638     Mutex.assertHeld();
639 
640     for (const auto &H : InUseBlocks) {
641       uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize();
642       if (allocatorSupportsMemoryTagging<Config>())
643         Ptr = untagPointer(Ptr);
644       Callback(Ptr);
645     }
646   }
647 
canCache(uptr Size)648   bool canCache(uptr Size) { return Cache.canCache(Size); }
649 
setOption(Option O,sptr Value)650   bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
651 
releaseToOS()652   void releaseToOS() { Cache.releaseToOS(); }
653 
disableMemoryTagging()654   void disableMemoryTagging() { Cache.disableMemoryTagging(); }
655 
unmapTestOnly()656   void unmapTestOnly() { Cache.unmapTestOnly(); }
657 
658   void getStats(ScopedString *Str);
659 
660 private:
661   typename Config::template CacheT<typename Config::CacheConfig> Cache;
662 
663   mutable HybridMutex Mutex;
664   DoublyLinkedList<LargeBlock::Header> InUseBlocks GUARDED_BY(Mutex);
665   uptr AllocatedBytes GUARDED_BY(Mutex) = 0;
666   uptr FreedBytes GUARDED_BY(Mutex) = 0;
667   uptr FragmentedBytes GUARDED_BY(Mutex) = 0;
668   uptr LargestSize GUARDED_BY(Mutex) = 0;
669   u32 NumberOfAllocs GUARDED_BY(Mutex) = 0;
670   u32 NumberOfFrees GUARDED_BY(Mutex) = 0;
671   LocalStats Stats GUARDED_BY(Mutex);
672 };
673 
674 template <typename Config>
675 void *
tryAllocateFromCache(const Options & Options,uptr Size,uptr Alignment,uptr * BlockEndPtr,FillContentsMode FillContents)676 MapAllocator<Config>::tryAllocateFromCache(const Options &Options, uptr Size,
677                                            uptr Alignment, uptr *BlockEndPtr,
678                                            FillContentsMode FillContents) {
679   CachedBlock Entry;
680   uptr EntryHeaderPos;
681   uptr MaxAllowedFragmentedPages = MaxUnreleasedCachePages;
682 
683   if (LIKELY(!useMemoryTagging<Config>(Options))) {
684     MaxAllowedFragmentedPages += CachedBlock::MaxReleasedCachePages;
685   } else {
686     // TODO: Enable MaxReleasedCachePages may result in pages for an entry being
687     // partially released and it erases the tag of those pages as well. To
688     // support this feature for MTE, we need to tag those pages again.
689     DCHECK_EQ(MaxAllowedFragmentedPages, MaxUnreleasedCachePages);
690   }
691 
692   Entry = Cache.retrieve(MaxAllowedFragmentedPages, Size, Alignment,
693                          getHeadersSize(), EntryHeaderPos);
694   if (!Entry.isValid())
695     return nullptr;
696 
697   LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
698       LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
699   bool Zeroed = Entry.Time == 0;
700   if (useMemoryTagging<Config>(Options)) {
701     uptr NewBlockBegin = reinterpret_cast<uptr>(H + 1);
702     Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
703     if (Zeroed) {
704       storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
705                 NewBlockBegin);
706     } else if (Entry.BlockBegin < NewBlockBegin) {
707       storeTags(Entry.BlockBegin, NewBlockBegin);
708     } else {
709       storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
710     }
711   }
712 
713   H->CommitBase = Entry.CommitBase;
714   H->CommitSize = Entry.CommitSize;
715   H->MemMap = Entry.MemMap;
716 
717   const uptr BlockEnd = H->CommitBase + H->CommitSize;
718   if (BlockEndPtr)
719     *BlockEndPtr = BlockEnd;
720   uptr HInt = reinterpret_cast<uptr>(H);
721   if (allocatorSupportsMemoryTagging<Config>())
722     HInt = untagPointer(HInt);
723   const uptr PtrInt = HInt + LargeBlock::getHeaderSize();
724   void *Ptr = reinterpret_cast<void *>(PtrInt);
725   if (FillContents && !Zeroed)
726     memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
727            BlockEnd - PtrInt);
728   {
729     ScopedLock L(Mutex);
730     InUseBlocks.push_back(H);
731     AllocatedBytes += H->CommitSize;
732     FragmentedBytes += H->MemMap.getCapacity() - H->CommitSize;
733     NumberOfAllocs++;
734     Stats.add(StatAllocated, H->CommitSize);
735     Stats.add(StatMapped, H->MemMap.getCapacity());
736   }
737   return Ptr;
738 }
739 // As with the Primary, the size passed to this function includes any desired
740 // alignment, so that the frontend can align the user allocation. The hint
741 // parameter allows us to unmap spurious memory when dealing with larger
742 // (greater than a page) alignments on 32-bit platforms.
743 // Due to the sparsity of address space available on those platforms, requesting
744 // an allocation from the Secondary with a large alignment would end up wasting
745 // VA space (even though we are not committing the whole thing), hence the need
746 // to trim off some of the reserved space.
747 // For allocations requested with an alignment greater than or equal to a page,
748 // the committed memory will amount to something close to Size - AlignmentHint
749 // (pending rounding and headers).
750 template <typename Config>
allocate(const Options & Options,uptr Size,uptr Alignment,uptr * BlockEndPtr,FillContentsMode FillContents)751 void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
752                                      uptr Alignment, uptr *BlockEndPtr,
753                                      FillContentsMode FillContents) {
754   if (Options.get(OptionBit::AddLargeAllocationSlack))
755     Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG;
756   Alignment = Max(Alignment, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG);
757   const uptr PageSize = getPageSizeCached();
758 
759   // Note that cached blocks may have aligned address already. Thus we simply
760   // pass the required size (`Size` + `getHeadersSize()`) to do cache look up.
761   const uptr MinNeededSizeForCache = roundUp(Size + getHeadersSize(), PageSize);
762 
763   if (Alignment < PageSize && Cache.canCache(MinNeededSizeForCache)) {
764     void *Ptr = tryAllocateFromCache(Options, Size, Alignment, BlockEndPtr,
765                                      FillContents);
766     if (Ptr != nullptr)
767       return Ptr;
768   }
769 
770   uptr RoundedSize =
771       roundUp(roundUp(Size, Alignment) + getHeadersSize(), PageSize);
772   if (UNLIKELY(Alignment > PageSize))
773     RoundedSize += Alignment - PageSize;
774 
775   ReservedMemoryT ReservedMemory;
776   const uptr MapSize = RoundedSize + 2 * getGuardPageSize();
777   if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr,
778                                       MAP_ALLOWNOMEM))) {
779     return nullptr;
780   }
781 
782   // Take the entire ownership of reserved region.
783   MemMapT MemMap = ReservedMemory.dispatch(ReservedMemory.getBase(),
784                                            ReservedMemory.getCapacity());
785   uptr MapBase = MemMap.getBase();
786   uptr CommitBase = MapBase + getGuardPageSize();
787   uptr MapEnd = MapBase + MapSize;
788 
789   // In the unlikely event of alignments larger than a page, adjust the amount
790   // of memory we want to commit, and trim the extra memory.
791   if (UNLIKELY(Alignment >= PageSize)) {
792     // For alignments greater than or equal to a page, the user pointer (eg:
793     // the pointer that is returned by the C or C++ allocation APIs) ends up
794     // on a page boundary , and our headers will live in the preceding page.
795     CommitBase =
796         roundUp(MapBase + getGuardPageSize() + 1, Alignment) - PageSize;
797     // We only trim the extra memory on 32-bit platforms: 64-bit platforms
798     // are less constrained memory wise, and that saves us two syscalls.
799     if (SCUDO_WORDSIZE == 32U) {
800       const uptr NewMapBase = CommitBase - getGuardPageSize();
801       DCHECK_GE(NewMapBase, MapBase);
802       if (NewMapBase != MapBase) {
803         MemMap.unmap(MapBase, NewMapBase - MapBase);
804         MapBase = NewMapBase;
805       }
806       // CommitBase is past the first guard page, but this computation needs
807       // to include a page where the header lives.
808       const uptr NewMapEnd =
809           CommitBase + PageSize + roundUp(Size, PageSize) + getGuardPageSize();
810       DCHECK_LE(NewMapEnd, MapEnd);
811       if (NewMapEnd != MapEnd) {
812         MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd);
813         MapEnd = NewMapEnd;
814       }
815     }
816   }
817 
818   const uptr CommitSize = MapEnd - getGuardPageSize() - CommitBase;
819   const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment);
820   if (!mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0,
821                             MemMap)) {
822     unmap(MemMap);
823     return nullptr;
824   }
825   const uptr HeaderPos = AllocPos - getHeadersSize();
826   // Make sure that the header is not in the guard page or before the base.
827   DCHECK_GE(HeaderPos, MapBase + getGuardPageSize());
828   LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
829       LargeBlock::addHeaderTag<Config>(HeaderPos));
830   if (useMemoryTagging<Config>(Options))
831     storeTags(LargeBlock::addHeaderTag<Config>(CommitBase),
832               reinterpret_cast<uptr>(H + 1));
833   H->CommitBase = CommitBase;
834   H->CommitSize = CommitSize;
835   H->MemMap = MemMap;
836   if (BlockEndPtr)
837     *BlockEndPtr = CommitBase + CommitSize;
838   {
839     ScopedLock L(Mutex);
840     InUseBlocks.push_back(H);
841     AllocatedBytes += CommitSize;
842     FragmentedBytes += H->MemMap.getCapacity() - CommitSize;
843     if (LargestSize < CommitSize)
844       LargestSize = CommitSize;
845     NumberOfAllocs++;
846     Stats.add(StatAllocated, CommitSize);
847     Stats.add(StatMapped, H->MemMap.getCapacity());
848   }
849   return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize());
850 }
851 
852 template <typename Config>
deallocate(const Options & Options,void * Ptr)853 void MapAllocator<Config>::deallocate(const Options &Options, void *Ptr)
854     EXCLUDES(Mutex) {
855   LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr);
856   const uptr CommitSize = H->CommitSize;
857   {
858     ScopedLock L(Mutex);
859     InUseBlocks.remove(H);
860     FreedBytes += CommitSize;
861     FragmentedBytes -= H->MemMap.getCapacity() - CommitSize;
862     NumberOfFrees++;
863     Stats.sub(StatAllocated, CommitSize);
864     Stats.sub(StatMapped, H->MemMap.getCapacity());
865   }
866 
867   if (Cache.canCache(H->CommitSize)) {
868     Cache.store(Options, H->CommitBase, H->CommitSize,
869                 reinterpret_cast<uptr>(H + 1), H->MemMap);
870   } else {
871     // Note that the `H->MemMap` is stored on the pages managed by itself. Take
872     // over the ownership before unmap() so that any operation along with
873     // unmap() won't touch inaccessible pages.
874     MemMapT MemMap = H->MemMap;
875     unmap(MemMap);
876   }
877 }
878 
879 template <typename Config>
getStats(ScopedString * Str)880 void MapAllocator<Config>::getStats(ScopedString *Str) EXCLUDES(Mutex) {
881   ScopedLock L(Mutex);
882   Str->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times "
883               "(%zuK), remains %u (%zuK) max %zuM, Fragmented %zuK\n",
884               NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
885               FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
886               (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20,
887               FragmentedBytes >> 10);
888   Cache.getStats(Str);
889 }
890 
891 } // namespace scudo
892 
893 #endif // SCUDO_SECONDARY_H_
894