• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- combined.h ----------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_COMBINED_H_
10 #define SCUDO_COMBINED_H_
11 
12 #include "allocator_config_wrapper.h"
13 #include "atomic_helpers.h"
14 #include "chunk.h"
15 #include "common.h"
16 #include "flags.h"
17 #include "flags_parser.h"
18 #include "local_cache.h"
19 #include "mem_map.h"
20 #include "memtag.h"
21 #include "mutex.h"
22 #include "options.h"
23 #include "quarantine.h"
24 #include "report.h"
25 #include "secondary.h"
26 #include "stack_depot.h"
27 #include "string_utils.h"
28 #include "tsd.h"
29 
30 #include "scudo/interface.h"
31 
32 #ifdef GWP_ASAN_HOOKS
33 #include "gwp_asan/guarded_pool_allocator.h"
34 #include "gwp_asan/optional/backtrace.h"
35 #include "gwp_asan/optional/segv_handler.h"
36 #endif // GWP_ASAN_HOOKS
37 
EmptyCallback()38 extern "C" inline void EmptyCallback() {}
39 
40 #ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
41 // This function is not part of the NDK so it does not appear in any public
42 // header files. We only declare/use it when targeting the platform.
43 extern "C" size_t android_unsafe_frame_pointer_chase(scudo::uptr *buf,
44                                                      size_t num_entries);
45 #endif
46 
47 namespace scudo {
48 
49 template <class Config, void (*PostInitCallback)(void) = EmptyCallback>
50 class Allocator {
51 public:
52   using AllocatorConfig = BaseConfig<Config>;
53   using PrimaryT =
54       typename AllocatorConfig::template PrimaryT<PrimaryConfig<Config>>;
55   using SecondaryT =
56       typename AllocatorConfig::template SecondaryT<SecondaryConfig<Config>>;
57   using CacheT = typename PrimaryT::CacheT;
58   typedef Allocator<Config, PostInitCallback> ThisT;
59   typedef typename AllocatorConfig::template TSDRegistryT<ThisT> TSDRegistryT;
60 
callPostInitCallback()61   void callPostInitCallback() {
62     pthread_once(&PostInitNonce, PostInitCallback);
63   }
64 
65   struct QuarantineCallback {
QuarantineCallbackQuarantineCallback66     explicit QuarantineCallback(ThisT &Instance, CacheT &LocalCache)
67         : Allocator(Instance), Cache(LocalCache) {}
68 
69     // Chunk recycling function, returns a quarantined chunk to the backend,
70     // first making sure it hasn't been tampered with.
recycleQuarantineCallback71     void recycle(void *Ptr) {
72       Chunk::UnpackedHeader Header;
73       Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
74       if (UNLIKELY(Header.State != Chunk::State::Quarantined))
75         reportInvalidChunkState(AllocatorAction::Recycling, Ptr);
76 
77       Header.State = Chunk::State::Available;
78       Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
79 
80       if (allocatorSupportsMemoryTagging<AllocatorConfig>())
81         Ptr = untagPointer(Ptr);
82       void *BlockBegin = Allocator::getBlockBegin(Ptr, &Header);
83       Cache.deallocate(Header.ClassId, BlockBegin);
84     }
85 
86     // We take a shortcut when allocating a quarantine batch by working with the
87     // appropriate class ID instead of using Size. The compiler should optimize
88     // the class ID computation and work with the associated cache directly.
allocateQuarantineCallback89     void *allocate(UNUSED uptr Size) {
90       const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
91           sizeof(QuarantineBatch) + Chunk::getHeaderSize());
92       void *Ptr = Cache.allocate(QuarantineClassId);
93       // Quarantine batch allocation failure is fatal.
94       if (UNLIKELY(!Ptr))
95         reportOutOfMemory(SizeClassMap::getSizeByClassId(QuarantineClassId));
96 
97       Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) +
98                                      Chunk::getHeaderSize());
99       Chunk::UnpackedHeader Header = {};
100       Header.ClassId = QuarantineClassId & Chunk::ClassIdMask;
101       Header.SizeOrUnusedBytes = sizeof(QuarantineBatch);
102       Header.State = Chunk::State::Allocated;
103       Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
104 
105       // Reset tag to 0 as this chunk may have been previously used for a tagged
106       // user allocation.
107       if (UNLIKELY(useMemoryTagging<AllocatorConfig>(
108               Allocator.Primary.Options.load())))
109         storeTags(reinterpret_cast<uptr>(Ptr),
110                   reinterpret_cast<uptr>(Ptr) + sizeof(QuarantineBatch));
111 
112       return Ptr;
113     }
114 
deallocateQuarantineCallback115     void deallocate(void *Ptr) {
116       const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
117           sizeof(QuarantineBatch) + Chunk::getHeaderSize());
118       Chunk::UnpackedHeader Header;
119       Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
120 
121       if (UNLIKELY(Header.State != Chunk::State::Allocated))
122         reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
123       DCHECK_EQ(Header.ClassId, QuarantineClassId);
124       DCHECK_EQ(Header.Offset, 0);
125       DCHECK_EQ(Header.SizeOrUnusedBytes, sizeof(QuarantineBatch));
126 
127       Header.State = Chunk::State::Available;
128       Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
129       Cache.deallocate(QuarantineClassId,
130                        reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
131                                                 Chunk::getHeaderSize()));
132     }
133 
134   private:
135     ThisT &Allocator;
136     CacheT &Cache;
137   };
138 
139   typedef GlobalQuarantine<QuarantineCallback, void> QuarantineT;
140   typedef typename QuarantineT::CacheT QuarantineCacheT;
141 
init()142   void init() {
143     performSanityChecks();
144 
145     // Check if hardware CRC32 is supported in the binary and by the platform,
146     // if so, opt for the CRC32 hardware version of the checksum.
147     if (&computeHardwareCRC32 && hasHardwareCRC32())
148       HashAlgorithm = Checksum::HardwareCRC32;
149 
150     if (UNLIKELY(!getRandom(&Cookie, sizeof(Cookie))))
151       Cookie = static_cast<u32>(getMonotonicTime() ^
152                                 (reinterpret_cast<uptr>(this) >> 4));
153 
154     initFlags();
155     reportUnrecognizedFlags();
156 
157     // Store some flags locally.
158     if (getFlags()->may_return_null)
159       Primary.Options.set(OptionBit::MayReturnNull);
160     if (getFlags()->zero_contents)
161       Primary.Options.setFillContentsMode(ZeroFill);
162     else if (getFlags()->pattern_fill_contents)
163       Primary.Options.setFillContentsMode(PatternOrZeroFill);
164     if (getFlags()->dealloc_type_mismatch)
165       Primary.Options.set(OptionBit::DeallocTypeMismatch);
166     if (getFlags()->delete_size_mismatch)
167       Primary.Options.set(OptionBit::DeleteSizeMismatch);
168     if (allocatorSupportsMemoryTagging<AllocatorConfig>() &&
169         systemSupportsMemoryTagging())
170       Primary.Options.set(OptionBit::UseMemoryTagging);
171 
172     QuarantineMaxChunkSize =
173         static_cast<u32>(getFlags()->quarantine_max_chunk_size);
174 
175     Stats.init();
176     // TODO(chiahungduan): Given that we support setting the default value in
177     // the PrimaryConfig and CacheConfig, consider to deprecate the use of
178     // `release_to_os_interval_ms` flag.
179     const s32 ReleaseToOsIntervalMs = getFlags()->release_to_os_interval_ms;
180     Primary.init(ReleaseToOsIntervalMs);
181     Secondary.init(&Stats, ReleaseToOsIntervalMs);
182     Quarantine.init(
183         static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
184         static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
185   }
186 
enableRingBuffer()187   void enableRingBuffer() NO_THREAD_SAFETY_ANALYSIS {
188     AllocationRingBuffer *RB = getRingBuffer();
189     if (RB)
190       RB->Depot->enable();
191     RingBufferInitLock.unlock();
192   }
193 
disableRingBuffer()194   void disableRingBuffer() NO_THREAD_SAFETY_ANALYSIS {
195     RingBufferInitLock.lock();
196     AllocationRingBuffer *RB = getRingBuffer();
197     if (RB)
198       RB->Depot->disable();
199   }
200 
201   // Initialize the embedded GWP-ASan instance. Requires the main allocator to
202   // be functional, best called from PostInitCallback.
initGwpAsan()203   void initGwpAsan() {
204 #ifdef GWP_ASAN_HOOKS
205     gwp_asan::options::Options Opt;
206     Opt.Enabled = getFlags()->GWP_ASAN_Enabled;
207     Opt.MaxSimultaneousAllocations =
208         getFlags()->GWP_ASAN_MaxSimultaneousAllocations;
209     Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate;
210     Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers;
211     Opt.Recoverable = getFlags()->GWP_ASAN_Recoverable;
212     // Embedded GWP-ASan is locked through the Scudo atfork handler (via
213     // Allocator::disable calling GWPASan.disable). Disable GWP-ASan's atfork
214     // handler.
215     Opt.InstallForkHandlers = false;
216     Opt.Backtrace = gwp_asan::backtrace::getBacktraceFunction();
217     GuardedAlloc.init(Opt);
218 
219     if (Opt.InstallSignalHandlers)
220       gwp_asan::segv_handler::installSignalHandlers(
221           &GuardedAlloc, Printf,
222           gwp_asan::backtrace::getPrintBacktraceFunction(),
223           gwp_asan::backtrace::getSegvBacktraceFunction(),
224           Opt.Recoverable);
225 
226     GuardedAllocSlotSize =
227         GuardedAlloc.getAllocatorState()->maximumAllocationSize();
228     Stats.add(StatFree, static_cast<uptr>(Opt.MaxSimultaneousAllocations) *
229                             GuardedAllocSlotSize);
230 #endif // GWP_ASAN_HOOKS
231   }
232 
233 #ifdef GWP_ASAN_HOOKS
getGwpAsanAllocationMetadata()234   const gwp_asan::AllocationMetadata *getGwpAsanAllocationMetadata() {
235     return GuardedAlloc.getMetadataRegion();
236   }
237 
getGwpAsanAllocatorState()238   const gwp_asan::AllocatorState *getGwpAsanAllocatorState() {
239     return GuardedAlloc.getAllocatorState();
240   }
241 #endif // GWP_ASAN_HOOKS
242 
243   ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
244     TSDRegistry.initThreadMaybe(this, MinimalInit);
245   }
246 
unmapTestOnly()247   void unmapTestOnly() {
248     unmapRingBuffer();
249     TSDRegistry.unmapTestOnly(this);
250     Primary.unmapTestOnly();
251     Secondary.unmapTestOnly();
252 #ifdef GWP_ASAN_HOOKS
253     if (getFlags()->GWP_ASAN_InstallSignalHandlers)
254       gwp_asan::segv_handler::uninstallSignalHandlers();
255     GuardedAlloc.uninitTestOnly();
256 #endif // GWP_ASAN_HOOKS
257   }
258 
getTSDRegistry()259   TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
getQuarantine()260   QuarantineT *getQuarantine() { return &Quarantine; }
261 
262   // The Cache must be provided zero-initialized.
initCache(CacheT * Cache)263   void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); }
264 
265   // Release the resources used by a TSD, which involves:
266   // - draining the local quarantine cache to the global quarantine;
267   // - releasing the cached pointers back to the Primary;
268   // - unlinking the local stats from the global ones (destroying the cache does
269   //   the last two items).
commitBack(TSD<ThisT> * TSD)270   void commitBack(TSD<ThisT> *TSD) {
271     TSD->assertLocked(/*BypassCheck=*/true);
272     Quarantine.drain(&TSD->getQuarantineCache(),
273                      QuarantineCallback(*this, TSD->getCache()));
274     TSD->getCache().destroy(&Stats);
275   }
276 
drainCache(TSD<ThisT> * TSD)277   void drainCache(TSD<ThisT> *TSD) {
278     TSD->assertLocked(/*BypassCheck=*/true);
279     Quarantine.drainAndRecycle(&TSD->getQuarantineCache(),
280                                QuarantineCallback(*this, TSD->getCache()));
281     TSD->getCache().drain();
282   }
drainCaches()283   void drainCaches() { TSDRegistry.drainCaches(this); }
284 
getHeaderTaggedPointer(void * Ptr)285   ALWAYS_INLINE void *getHeaderTaggedPointer(void *Ptr) {
286     if (!allocatorSupportsMemoryTagging<AllocatorConfig>())
287       return Ptr;
288     auto UntaggedPtr = untagPointer(Ptr);
289     if (UntaggedPtr != Ptr)
290       return UntaggedPtr;
291     // Secondary, or pointer allocated while memory tagging is unsupported or
292     // disabled. The tag mismatch is okay in the latter case because tags will
293     // not be checked.
294     return addHeaderTag(Ptr);
295   }
296 
addHeaderTag(uptr Ptr)297   ALWAYS_INLINE uptr addHeaderTag(uptr Ptr) {
298     if (!allocatorSupportsMemoryTagging<AllocatorConfig>())
299       return Ptr;
300     return addFixedTag(Ptr, 2);
301   }
302 
addHeaderTag(void * Ptr)303   ALWAYS_INLINE void *addHeaderTag(void *Ptr) {
304     return reinterpret_cast<void *>(addHeaderTag(reinterpret_cast<uptr>(Ptr)));
305   }
306 
collectStackTrace(UNUSED StackDepot * Depot)307   NOINLINE u32 collectStackTrace(UNUSED StackDepot *Depot) {
308 #ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
309     // Discard collectStackTrace() frame and allocator function frame.
310     constexpr uptr DiscardFrames = 2;
311     uptr Stack[MaxTraceSize + DiscardFrames];
312     uptr Size =
313         android_unsafe_frame_pointer_chase(Stack, MaxTraceSize + DiscardFrames);
314     Size = Min<uptr>(Size, MaxTraceSize + DiscardFrames);
315     return Depot->insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size);
316 #else
317     return 0;
318 #endif
319   }
320 
computeOddEvenMaskForPointerMaybe(const Options & Options,uptr Ptr,uptr ClassId)321   uptr computeOddEvenMaskForPointerMaybe(const Options &Options, uptr Ptr,
322                                          uptr ClassId) {
323     if (!Options.get(OptionBit::UseOddEvenTags))
324       return 0;
325 
326     // If a chunk's tag is odd, we want the tags of the surrounding blocks to be
327     // even, and vice versa. Blocks are laid out Size bytes apart, and adding
328     // Size to Ptr will flip the least significant set bit of Size in Ptr, so
329     // that bit will have the pattern 010101... for consecutive blocks, which we
330     // can use to determine which tag mask to use.
331     return 0x5555U << ((Ptr >> SizeClassMap::getSizeLSBByClassId(ClassId)) & 1);
332   }
333 
334   NOINLINE void *allocate(uptr Size, Chunk::Origin Origin,
335                           uptr Alignment = MinAlignment,
336                           bool ZeroContents = false) NO_THREAD_SAFETY_ANALYSIS {
337     initThreadMaybe();
338 
339     const Options Options = Primary.Options.load();
340     if (UNLIKELY(Alignment > MaxAlignment)) {
341       if (Options.get(OptionBit::MayReturnNull))
342         return nullptr;
343       reportAlignmentTooBig(Alignment, MaxAlignment);
344     }
345     if (Alignment < MinAlignment)
346       Alignment = MinAlignment;
347 
348 #ifdef GWP_ASAN_HOOKS
349     if (UNLIKELY(GuardedAlloc.shouldSample())) {
350       if (void *Ptr = GuardedAlloc.allocate(Size, Alignment)) {
351         Stats.lock();
352         Stats.add(StatAllocated, GuardedAllocSlotSize);
353         Stats.sub(StatFree, GuardedAllocSlotSize);
354         Stats.unlock();
355         return Ptr;
356       }
357     }
358 #endif // GWP_ASAN_HOOKS
359 
360     const FillContentsMode FillContents = ZeroContents ? ZeroFill
361                                           : TSDRegistry.getDisableMemInit()
362                                               ? NoFill
363                                               : Options.getFillContentsMode();
364 
365     // If the requested size happens to be 0 (more common than you might think),
366     // allocate MinAlignment bytes on top of the header. Then add the extra
367     // bytes required to fulfill the alignment requirements: we allocate enough
368     // to be sure that there will be an address in the block that will satisfy
369     // the alignment.
370     const uptr NeededSize =
371         roundUp(Size, MinAlignment) +
372         ((Alignment > MinAlignment) ? Alignment : Chunk::getHeaderSize());
373 
374     // Takes care of extravagantly large sizes as well as integer overflows.
375     static_assert(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment, "");
376     if (UNLIKELY(Size >= MaxAllowedMallocSize)) {
377       if (Options.get(OptionBit::MayReturnNull))
378         return nullptr;
379       reportAllocationSizeTooBig(Size, NeededSize, MaxAllowedMallocSize);
380     }
381     DCHECK_LE(Size, NeededSize);
382 
383     void *Block = nullptr;
384     uptr ClassId = 0;
385     uptr SecondaryBlockEnd = 0;
386     if (LIKELY(PrimaryT::canAllocate(NeededSize))) {
387       ClassId = SizeClassMap::getClassIdBySize(NeededSize);
388       DCHECK_NE(ClassId, 0U);
389       typename TSDRegistryT::ScopedTSD TSD(TSDRegistry);
390       Block = TSD->getCache().allocate(ClassId);
391       // If the allocation failed, retry in each successively larger class until
392       // it fits. If it fails to fit in the largest class, fallback to the
393       // Secondary.
394       if (UNLIKELY(!Block)) {
395         while (ClassId < SizeClassMap::LargestClassId && !Block)
396           Block = TSD->getCache().allocate(++ClassId);
397         if (!Block)
398           ClassId = 0;
399       }
400     }
401     if (UNLIKELY(ClassId == 0)) {
402       Block = Secondary.allocate(Options, Size, Alignment, &SecondaryBlockEnd,
403                                  FillContents);
404     }
405 
406     if (UNLIKELY(!Block)) {
407       if (Options.get(OptionBit::MayReturnNull))
408         return nullptr;
409       printStats();
410       reportOutOfMemory(NeededSize);
411     }
412 
413     const uptr UserPtr = roundUp(
414         reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize(), Alignment);
415     const uptr SizeOrUnusedBytes =
416         ClassId ? Size : SecondaryBlockEnd - (UserPtr + Size);
417 
418     if (LIKELY(!useMemoryTagging<AllocatorConfig>(Options))) {
419       return initChunk(ClassId, Origin, Block, UserPtr, SizeOrUnusedBytes,
420                        FillContents);
421     }
422 
423     return initChunkWithMemoryTagging(ClassId, Origin, Block, UserPtr, Size,
424                                       SizeOrUnusedBytes, FillContents);
425   }
426 
427   NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
428                            UNUSED uptr Alignment = MinAlignment) {
429     if (UNLIKELY(!Ptr))
430       return;
431 
432     // For a deallocation, we only ensure minimal initialization, meaning thread
433     // local data will be left uninitialized for now (when using ELF TLS). The
434     // fallback cache will be used instead. This is a workaround for a situation
435     // where the only heap operation performed in a thread would be a free past
436     // the TLS destructors, ending up in initialized thread specific data never
437     // being destroyed properly. Any other heap operation will do a full init.
438     initThreadMaybe(/*MinimalInit=*/true);
439 
440 #ifdef GWP_ASAN_HOOKS
441     if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
442       GuardedAlloc.deallocate(Ptr);
443       Stats.lock();
444       Stats.add(StatFree, GuardedAllocSlotSize);
445       Stats.sub(StatAllocated, GuardedAllocSlotSize);
446       Stats.unlock();
447       return;
448     }
449 #endif // GWP_ASAN_HOOKS
450 
451     if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment)))
452       reportMisalignedPointer(AllocatorAction::Deallocating, Ptr);
453 
454     void *TaggedPtr = Ptr;
455     Ptr = getHeaderTaggedPointer(Ptr);
456 
457     Chunk::UnpackedHeader Header;
458     Chunk::loadHeader(Cookie, Ptr, &Header);
459 
460     if (UNLIKELY(Header.State != Chunk::State::Allocated))
461       reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
462 
463     const Options Options = Primary.Options.load();
464     if (Options.get(OptionBit::DeallocTypeMismatch)) {
465       if (UNLIKELY(Header.OriginOrWasZeroed != Origin)) {
466         // With the exception of memalign'd chunks, that can be still be free'd.
467         if (Header.OriginOrWasZeroed != Chunk::Origin::Memalign ||
468             Origin != Chunk::Origin::Malloc)
469           reportDeallocTypeMismatch(AllocatorAction::Deallocating, Ptr,
470                                     Header.OriginOrWasZeroed, Origin);
471       }
472     }
473 
474     const uptr Size = getSize(Ptr, &Header);
475     if (DeleteSize && Options.get(OptionBit::DeleteSizeMismatch)) {
476       if (UNLIKELY(DeleteSize != Size))
477         reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
478     }
479 
480     quarantineOrDeallocateChunk(Options, TaggedPtr, &Header, Size);
481   }
482 
483   void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
484     initThreadMaybe();
485 
486     const Options Options = Primary.Options.load();
487     if (UNLIKELY(NewSize >= MaxAllowedMallocSize)) {
488       if (Options.get(OptionBit::MayReturnNull))
489         return nullptr;
490       reportAllocationSizeTooBig(NewSize, 0, MaxAllowedMallocSize);
491     }
492 
493     // The following cases are handled by the C wrappers.
494     DCHECK_NE(OldPtr, nullptr);
495     DCHECK_NE(NewSize, 0);
496 
497 #ifdef GWP_ASAN_HOOKS
498     if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))) {
499       uptr OldSize = GuardedAlloc.getSize(OldPtr);
500       void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
501       if (NewPtr)
502         memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize);
503       GuardedAlloc.deallocate(OldPtr);
504       Stats.lock();
505       Stats.add(StatFree, GuardedAllocSlotSize);
506       Stats.sub(StatAllocated, GuardedAllocSlotSize);
507       Stats.unlock();
508       return NewPtr;
509     }
510 #endif // GWP_ASAN_HOOKS
511 
512     void *OldTaggedPtr = OldPtr;
513     OldPtr = getHeaderTaggedPointer(OldPtr);
514 
515     if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment)))
516       reportMisalignedPointer(AllocatorAction::Reallocating, OldPtr);
517 
518     Chunk::UnpackedHeader Header;
519     Chunk::loadHeader(Cookie, OldPtr, &Header);
520 
521     if (UNLIKELY(Header.State != Chunk::State::Allocated))
522       reportInvalidChunkState(AllocatorAction::Reallocating, OldPtr);
523 
524     // Pointer has to be allocated with a malloc-type function. Some
525     // applications think that it is OK to realloc a memalign'ed pointer, which
526     // will trigger this check. It really isn't.
527     if (Options.get(OptionBit::DeallocTypeMismatch)) {
528       if (UNLIKELY(Header.OriginOrWasZeroed != Chunk::Origin::Malloc))
529         reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
530                                   Header.OriginOrWasZeroed,
531                                   Chunk::Origin::Malloc);
532     }
533 
534     void *BlockBegin = getBlockBegin(OldTaggedPtr, &Header);
535     uptr BlockEnd;
536     uptr OldSize;
537     const uptr ClassId = Header.ClassId;
538     if (LIKELY(ClassId)) {
539       BlockEnd = reinterpret_cast<uptr>(BlockBegin) +
540                  SizeClassMap::getSizeByClassId(ClassId);
541       OldSize = Header.SizeOrUnusedBytes;
542     } else {
543       BlockEnd = SecondaryT::getBlockEnd(BlockBegin);
544       OldSize = BlockEnd - (reinterpret_cast<uptr>(OldTaggedPtr) +
545                             Header.SizeOrUnusedBytes);
546     }
547     // If the new chunk still fits in the previously allocated block (with a
548     // reasonable delta), we just keep the old block, and update the chunk
549     // header to reflect the size change.
550     if (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize <= BlockEnd) {
551       if (NewSize > OldSize || (OldSize - NewSize) < getPageSizeCached()) {
552         Header.SizeOrUnusedBytes =
553             (ClassId ? NewSize
554                      : BlockEnd -
555                            (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize)) &
556             Chunk::SizeOrUnusedBytesMask;
557         Chunk::storeHeader(Cookie, OldPtr, &Header);
558         if (UNLIKELY(useMemoryTagging<AllocatorConfig>(Options))) {
559           if (ClassId) {
560             resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
561                               reinterpret_cast<uptr>(OldTaggedPtr) + NewSize,
562                               NewSize, untagPointer(BlockEnd));
563             storePrimaryAllocationStackMaybe(Options, OldPtr);
564           } else {
565             storeSecondaryAllocationStackMaybe(Options, OldPtr, NewSize);
566           }
567         }
568         return OldTaggedPtr;
569       }
570     }
571 
572     // Otherwise we allocate a new one, and deallocate the old one. Some
573     // allocators will allocate an even larger chunk (by a fixed factor) to
574     // allow for potential further in-place realloc. The gains of such a trick
575     // are currently unclear.
576     void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
577     if (LIKELY(NewPtr)) {
578       memcpy(NewPtr, OldTaggedPtr, Min(NewSize, OldSize));
579       quarantineOrDeallocateChunk(Options, OldTaggedPtr, &Header, OldSize);
580     }
581     return NewPtr;
582   }
583 
584   // TODO(kostyak): disable() is currently best-effort. There are some small
585   //                windows of time when an allocation could still succeed after
586   //                this function finishes. We will revisit that later.
disable()587   void disable() NO_THREAD_SAFETY_ANALYSIS {
588     initThreadMaybe();
589 #ifdef GWP_ASAN_HOOKS
590     GuardedAlloc.disable();
591 #endif
592     TSDRegistry.disable();
593     Stats.disable();
594     Quarantine.disable();
595     Primary.disable();
596     Secondary.disable();
597     disableRingBuffer();
598   }
599 
enable()600   void enable() NO_THREAD_SAFETY_ANALYSIS {
601     initThreadMaybe();
602     enableRingBuffer();
603     Secondary.enable();
604     Primary.enable();
605     Quarantine.enable();
606     Stats.enable();
607     TSDRegistry.enable();
608 #ifdef GWP_ASAN_HOOKS
609     GuardedAlloc.enable();
610 #endif
611   }
612 
613   // The function returns the amount of bytes required to store the statistics,
614   // which might be larger than the amount of bytes provided. Note that the
615   // statistics buffer is not necessarily constant between calls to this
616   // function. This can be called with a null buffer or zero size for buffer
617   // sizing purposes.
getStats(char * Buffer,uptr Size)618   uptr getStats(char *Buffer, uptr Size) {
619     ScopedString Str;
620     const uptr Length = getStats(&Str) + 1;
621     if (Length < Size)
622       Size = Length;
623     if (Buffer && Size) {
624       memcpy(Buffer, Str.data(), Size);
625       Buffer[Size - 1] = '\0';
626     }
627     return Length;
628   }
629 
printStats()630   void printStats() {
631     ScopedString Str;
632     getStats(&Str);
633     Str.output();
634   }
635 
printFragmentationInfo()636   void printFragmentationInfo() {
637     ScopedString Str;
638     Primary.getFragmentationInfo(&Str);
639     // Secondary allocator dumps the fragmentation data in getStats().
640     Str.output();
641   }
642 
releaseToOS(ReleaseToOS ReleaseType)643   void releaseToOS(ReleaseToOS ReleaseType) {
644     initThreadMaybe();
645     if (ReleaseType == ReleaseToOS::ForceAll)
646       drainCaches();
647     Primary.releaseToOS(ReleaseType);
648     Secondary.releaseToOS();
649   }
650 
651   // Iterate over all chunks and call a callback for all busy chunks located
652   // within the provided memory range. Said callback must not use this allocator
653   // or a deadlock can ensue. This fits Android's malloc_iterate() needs.
iterateOverChunks(uptr Base,uptr Size,iterate_callback Callback,void * Arg)654   void iterateOverChunks(uptr Base, uptr Size, iterate_callback Callback,
655                          void *Arg) {
656     initThreadMaybe();
657     if (archSupportsMemoryTagging())
658       Base = untagPointer(Base);
659     const uptr From = Base;
660     const uptr To = Base + Size;
661     bool MayHaveTaggedPrimary =
662         allocatorSupportsMemoryTagging<AllocatorConfig>() &&
663         systemSupportsMemoryTagging();
664     auto Lambda = [this, From, To, MayHaveTaggedPrimary, Callback,
665                    Arg](uptr Block) {
666       if (Block < From || Block >= To)
667         return;
668       uptr Chunk;
669       Chunk::UnpackedHeader Header;
670       if (MayHaveTaggedPrimary) {
671         // A chunk header can either have a zero tag (tagged primary) or the
672         // header tag (secondary, or untagged primary). We don't know which so
673         // try both.
674         ScopedDisableMemoryTagChecks x;
675         if (!getChunkFromBlock(Block, &Chunk, &Header) &&
676             !getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
677           return;
678       } else {
679         if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
680           return;
681       }
682       if (Header.State == Chunk::State::Allocated) {
683         uptr TaggedChunk = Chunk;
684         if (allocatorSupportsMemoryTagging<AllocatorConfig>())
685           TaggedChunk = untagPointer(TaggedChunk);
686         if (useMemoryTagging<AllocatorConfig>(Primary.Options.load()))
687           TaggedChunk = loadTag(Chunk);
688         Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header),
689                  Arg);
690       }
691     };
692     Primary.iterateOverBlocks(Lambda);
693     Secondary.iterateOverBlocks(Lambda);
694 #ifdef GWP_ASAN_HOOKS
695     GuardedAlloc.iterate(reinterpret_cast<void *>(Base), Size, Callback, Arg);
696 #endif
697   }
698 
canReturnNull()699   bool canReturnNull() {
700     initThreadMaybe();
701     return Primary.Options.load().get(OptionBit::MayReturnNull);
702   }
703 
setOption(Option O,sptr Value)704   bool setOption(Option O, sptr Value) {
705     initThreadMaybe();
706     if (O == Option::MemtagTuning) {
707       // Enabling odd/even tags involves a tradeoff between use-after-free
708       // detection and buffer overflow detection. Odd/even tags make it more
709       // likely for buffer overflows to be detected by increasing the size of
710       // the guaranteed "red zone" around the allocation, but on the other hand
711       // use-after-free is less likely to be detected because the tag space for
712       // any particular chunk is cut in half. Therefore we use this tuning
713       // setting to control whether odd/even tags are enabled.
714       if (Value == M_MEMTAG_TUNING_BUFFER_OVERFLOW)
715         Primary.Options.set(OptionBit::UseOddEvenTags);
716       else if (Value == M_MEMTAG_TUNING_UAF)
717         Primary.Options.clear(OptionBit::UseOddEvenTags);
718       return true;
719     } else {
720       // We leave it to the various sub-components to decide whether or not they
721       // want to handle the option, but we do not want to short-circuit
722       // execution if one of the setOption was to return false.
723       const bool PrimaryResult = Primary.setOption(O, Value);
724       const bool SecondaryResult = Secondary.setOption(O, Value);
725       const bool RegistryResult = TSDRegistry.setOption(O, Value);
726       return PrimaryResult && SecondaryResult && RegistryResult;
727     }
728     return false;
729   }
730 
731   // Return the usable size for a given chunk. Technically we lie, as we just
732   // report the actual size of a chunk. This is done to counteract code actively
733   // writing past the end of a chunk (like sqlite3) when the usable size allows
734   // for it, which then forces realloc to copy the usable size of a chunk as
735   // opposed to its actual size.
getUsableSize(const void * Ptr)736   uptr getUsableSize(const void *Ptr) {
737     if (UNLIKELY(!Ptr))
738       return 0;
739 
740     return getAllocSize(Ptr);
741   }
742 
getAllocSize(const void * Ptr)743   uptr getAllocSize(const void *Ptr) {
744     initThreadMaybe();
745 
746 #ifdef GWP_ASAN_HOOKS
747     if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
748       return GuardedAlloc.getSize(Ptr);
749 #endif // GWP_ASAN_HOOKS
750 
751     Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
752     Chunk::UnpackedHeader Header;
753     Chunk::loadHeader(Cookie, Ptr, &Header);
754 
755     // Getting the alloc size of a chunk only makes sense if it's allocated.
756     if (UNLIKELY(Header.State != Chunk::State::Allocated))
757       reportInvalidChunkState(AllocatorAction::Sizing, const_cast<void *>(Ptr));
758 
759     return getSize(Ptr, &Header);
760   }
761 
getStats(StatCounters S)762   void getStats(StatCounters S) {
763     initThreadMaybe();
764     Stats.get(S);
765   }
766 
767   // Returns true if the pointer provided was allocated by the current
768   // allocator instance, which is compliant with tcmalloc's ownership concept.
769   // A corrupted chunk will not be reported as owned, which is WAI.
isOwned(const void * Ptr)770   bool isOwned(const void *Ptr) {
771     initThreadMaybe();
772 #ifdef GWP_ASAN_HOOKS
773     if (GuardedAlloc.pointerIsMine(Ptr))
774       return true;
775 #endif // GWP_ASAN_HOOKS
776     if (!Ptr || !isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment))
777       return false;
778     Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
779     Chunk::UnpackedHeader Header;
780     return Chunk::isValid(Cookie, Ptr, &Header) &&
781            Header.State == Chunk::State::Allocated;
782   }
783 
useMemoryTaggingTestOnly()784   bool useMemoryTaggingTestOnly() const {
785     return useMemoryTagging<AllocatorConfig>(Primary.Options.load());
786   }
disableMemoryTagging()787   void disableMemoryTagging() {
788     // If we haven't been initialized yet, we need to initialize now in order to
789     // prevent a future call to initThreadMaybe() from enabling memory tagging
790     // based on feature detection. But don't call initThreadMaybe() because it
791     // may end up calling the allocator (via pthread_atfork, via the post-init
792     // callback), which may cause mappings to be created with memory tagging
793     // enabled.
794     TSDRegistry.initOnceMaybe(this);
795     if (allocatorSupportsMemoryTagging<AllocatorConfig>()) {
796       Secondary.disableMemoryTagging();
797       Primary.Options.clear(OptionBit::UseMemoryTagging);
798     }
799   }
800 
setTrackAllocationStacks(bool Track)801   void setTrackAllocationStacks(bool Track) {
802     initThreadMaybe();
803     if (getFlags()->allocation_ring_buffer_size <= 0) {
804       DCHECK(!Primary.Options.load().get(OptionBit::TrackAllocationStacks));
805       return;
806     }
807 
808     if (Track) {
809       initRingBufferMaybe();
810       Primary.Options.set(OptionBit::TrackAllocationStacks);
811     } else
812       Primary.Options.clear(OptionBit::TrackAllocationStacks);
813   }
814 
setFillContents(FillContentsMode FillContents)815   void setFillContents(FillContentsMode FillContents) {
816     initThreadMaybe();
817     Primary.Options.setFillContentsMode(FillContents);
818   }
819 
setAddLargeAllocationSlack(bool AddSlack)820   void setAddLargeAllocationSlack(bool AddSlack) {
821     initThreadMaybe();
822     if (AddSlack)
823       Primary.Options.set(OptionBit::AddLargeAllocationSlack);
824     else
825       Primary.Options.clear(OptionBit::AddLargeAllocationSlack);
826   }
827 
getStackDepotAddress()828   const char *getStackDepotAddress() {
829     initThreadMaybe();
830     AllocationRingBuffer *RB = getRingBuffer();
831     return RB ? reinterpret_cast<char *>(RB->Depot) : nullptr;
832   }
833 
getStackDepotSize()834   uptr getStackDepotSize() {
835     initThreadMaybe();
836     AllocationRingBuffer *RB = getRingBuffer();
837     return RB ? RB->StackDepotSize : 0;
838   }
839 
getRegionInfoArrayAddress()840   const char *getRegionInfoArrayAddress() const {
841     return Primary.getRegionInfoArrayAddress();
842   }
843 
getRegionInfoArraySize()844   static uptr getRegionInfoArraySize() {
845     return PrimaryT::getRegionInfoArraySize();
846   }
847 
getRingBufferAddress()848   const char *getRingBufferAddress() {
849     initThreadMaybe();
850     return reinterpret_cast<char *>(getRingBuffer());
851   }
852 
getRingBufferSize()853   uptr getRingBufferSize() {
854     initThreadMaybe();
855     AllocationRingBuffer *RB = getRingBuffer();
856     return RB && RB->RingBufferElements
857                ? ringBufferSizeInBytes(RB->RingBufferElements)
858                : 0;
859   }
860 
861   static const uptr MaxTraceSize = 64;
862 
collectTraceMaybe(const StackDepot * Depot,uintptr_t (& Trace)[MaxTraceSize],u32 Hash)863   static void collectTraceMaybe(const StackDepot *Depot,
864                                 uintptr_t (&Trace)[MaxTraceSize], u32 Hash) {
865     uptr RingPos, Size;
866     if (!Depot->find(Hash, &RingPos, &Size))
867       return;
868     for (unsigned I = 0; I != Size && I != MaxTraceSize; ++I)
869       Trace[I] = static_cast<uintptr_t>(Depot->at(RingPos + I));
870   }
871 
getErrorInfo(struct scudo_error_info * ErrorInfo,uintptr_t FaultAddr,const char * DepotPtr,size_t DepotSize,const char * RegionInfoPtr,const char * RingBufferPtr,size_t RingBufferSize,const char * Memory,const char * MemoryTags,uintptr_t MemoryAddr,size_t MemorySize)872   static void getErrorInfo(struct scudo_error_info *ErrorInfo,
873                            uintptr_t FaultAddr, const char *DepotPtr,
874                            size_t DepotSize, const char *RegionInfoPtr,
875                            const char *RingBufferPtr, size_t RingBufferSize,
876                            const char *Memory, const char *MemoryTags,
877                            uintptr_t MemoryAddr, size_t MemorySize) {
878     // N.B. we need to support corrupted data in any of the buffers here. We get
879     // this information from an external process (the crashing process) that
880     // should not be able to crash the crash dumper (crash_dump on Android).
881     // See also the get_error_info_fuzzer.
882     *ErrorInfo = {};
883     if (!allocatorSupportsMemoryTagging<AllocatorConfig>() ||
884         MemoryAddr + MemorySize < MemoryAddr)
885       return;
886 
887     const StackDepot *Depot = nullptr;
888     if (DepotPtr) {
889       // check for corrupted StackDepot. First we need to check whether we can
890       // read the metadata, then whether the metadata matches the size.
891       if (DepotSize < sizeof(*Depot))
892         return;
893       Depot = reinterpret_cast<const StackDepot *>(DepotPtr);
894       if (!Depot->isValid(DepotSize))
895         return;
896     }
897 
898     size_t NextErrorReport = 0;
899 
900     // Check for OOB in the current block and the two surrounding blocks. Beyond
901     // that, UAF is more likely.
902     if (extractTag(FaultAddr) != 0)
903       getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
904                          RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
905                          MemorySize, 0, 2);
906 
907     // Check the ring buffer. For primary allocations this will only find UAF;
908     // for secondary allocations we can find either UAF or OOB.
909     getRingBufferErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
910                            RingBufferPtr, RingBufferSize);
911 
912     // Check for OOB in the 28 blocks surrounding the 3 we checked earlier.
913     // Beyond that we are likely to hit false positives.
914     if (extractTag(FaultAddr) != 0)
915       getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
916                          RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
917                          MemorySize, 2, 16);
918   }
919 
920 private:
921   typedef typename PrimaryT::SizeClassMap SizeClassMap;
922 
923   static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG;
924   static const uptr MaxAlignmentLog = 24U; // 16 MB seems reasonable.
925   static const uptr MinAlignment = 1UL << MinAlignmentLog;
926   static const uptr MaxAlignment = 1UL << MaxAlignmentLog;
927   static const uptr MaxAllowedMallocSize =
928       FIRST_32_SECOND_64(1UL << 31, 1ULL << 40);
929 
930   static_assert(MinAlignment >= sizeof(Chunk::PackedHeader),
931                 "Minimal alignment must at least cover a chunk header.");
932   static_assert(!allocatorSupportsMemoryTagging<AllocatorConfig>() ||
933                     MinAlignment >= archMemoryTagGranuleSize(),
934                 "");
935 
936   static const u32 BlockMarker = 0x44554353U;
937 
938   // These are indexes into an "array" of 32-bit values that store information
939   // inline with a chunk that is relevant to diagnosing memory tag faults, where
940   // 0 corresponds to the address of the user memory. This means that only
941   // negative indexes may be used. The smallest index that may be used is -2,
942   // which corresponds to 8 bytes before the user memory, because the chunk
943   // header size is 8 bytes and in allocators that support memory tagging the
944   // minimum alignment is at least the tag granule size (16 on aarch64).
945   static const sptr MemTagAllocationTraceIndex = -2;
946   static const sptr MemTagAllocationTidIndex = -1;
947 
948   u32 Cookie = 0;
949   u32 QuarantineMaxChunkSize = 0;
950 
951   GlobalStats Stats;
952   PrimaryT Primary;
953   SecondaryT Secondary;
954   QuarantineT Quarantine;
955   TSDRegistryT TSDRegistry;
956   pthread_once_t PostInitNonce = PTHREAD_ONCE_INIT;
957 
958 #ifdef GWP_ASAN_HOOKS
959   gwp_asan::GuardedPoolAllocator GuardedAlloc;
960   uptr GuardedAllocSlotSize = 0;
961 #endif // GWP_ASAN_HOOKS
962 
963   struct AllocationRingBuffer {
964     struct Entry {
965       atomic_uptr Ptr;
966       atomic_uptr AllocationSize;
967       atomic_u32 AllocationTrace;
968       atomic_u32 AllocationTid;
969       atomic_u32 DeallocationTrace;
970       atomic_u32 DeallocationTid;
971     };
972     StackDepot *Depot = nullptr;
973     uptr StackDepotSize = 0;
974     MemMapT RawRingBufferMap;
975     MemMapT RawStackDepotMap;
976     u32 RingBufferElements = 0;
977     atomic_uptr Pos;
978     // An array of Size (at least one) elements of type Entry is immediately
979     // following to this struct.
980   };
981   static_assert(sizeof(AllocationRingBuffer) %
982                         alignof(typename AllocationRingBuffer::Entry) ==
983                     0,
984                 "invalid alignment");
985 
986   // Lock to initialize the RingBuffer
987   HybridMutex RingBufferInitLock;
988 
989   // Pointer to memory mapped area starting with AllocationRingBuffer struct,
990   // and immediately followed by Size elements of type Entry.
991   atomic_uptr RingBufferAddress = {};
992 
getRingBuffer()993   AllocationRingBuffer *getRingBuffer() {
994     return reinterpret_cast<AllocationRingBuffer *>(
995         atomic_load(&RingBufferAddress, memory_order_acquire));
996   }
997 
998   // The following might get optimized out by the compiler.
performSanityChecks()999   NOINLINE void performSanityChecks() {
1000     // Verify that the header offset field can hold the maximum offset. In the
1001     // case of the Secondary allocator, it takes care of alignment and the
1002     // offset will always be small. In the case of the Primary, the worst case
1003     // scenario happens in the last size class, when the backend allocation
1004     // would already be aligned on the requested alignment, which would happen
1005     // to be the maximum alignment that would fit in that size class. As a
1006     // result, the maximum offset will be at most the maximum alignment for the
1007     // last size class minus the header size, in multiples of MinAlignment.
1008     Chunk::UnpackedHeader Header = {};
1009     const uptr MaxPrimaryAlignment = 1UL << getMostSignificantSetBitIndex(
1010                                          SizeClassMap::MaxSize - MinAlignment);
1011     const uptr MaxOffset =
1012         (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
1013     Header.Offset = MaxOffset & Chunk::OffsetMask;
1014     if (UNLIKELY(Header.Offset != MaxOffset))
1015       reportSanityCheckError("offset");
1016 
1017     // Verify that we can fit the maximum size or amount of unused bytes in the
1018     // header. Given that the Secondary fits the allocation to a page, the worst
1019     // case scenario happens in the Primary. It will depend on the second to
1020     // last and last class sizes, as well as the dynamic base for the Primary.
1021     // The following is an over-approximation that works for our needs.
1022     const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1;
1023     Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
1024     if (UNLIKELY(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes))
1025       reportSanityCheckError("size (or unused bytes)");
1026 
1027     const uptr LargestClassId = SizeClassMap::LargestClassId;
1028     Header.ClassId = LargestClassId;
1029     if (UNLIKELY(Header.ClassId != LargestClassId))
1030       reportSanityCheckError("class ID");
1031   }
1032 
getBlockBegin(const void * Ptr,Chunk::UnpackedHeader * Header)1033   static inline void *getBlockBegin(const void *Ptr,
1034                                     Chunk::UnpackedHeader *Header) {
1035     return reinterpret_cast<void *>(
1036         reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() -
1037         (static_cast<uptr>(Header->Offset) << MinAlignmentLog));
1038   }
1039 
1040   // Return the size of a chunk as requested during its allocation.
getSize(const void * Ptr,Chunk::UnpackedHeader * Header)1041   inline uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
1042     const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
1043     if (LIKELY(Header->ClassId))
1044       return SizeOrUnusedBytes;
1045     if (allocatorSupportsMemoryTagging<AllocatorConfig>())
1046       Ptr = untagPointer(const_cast<void *>(Ptr));
1047     return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
1048            reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
1049   }
1050 
initChunk(const uptr ClassId,const Chunk::Origin Origin,void * Block,const uptr UserPtr,const uptr SizeOrUnusedBytes,const FillContentsMode FillContents)1051   ALWAYS_INLINE void *initChunk(const uptr ClassId, const Chunk::Origin Origin,
1052                                 void *Block, const uptr UserPtr,
1053                                 const uptr SizeOrUnusedBytes,
1054                                 const FillContentsMode FillContents) {
1055     // Compute the default pointer before adding the header tag
1056     const uptr DefaultAlignedPtr =
1057         reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
1058 
1059     Block = addHeaderTag(Block);
1060     // Only do content fill when it's from primary allocator because secondary
1061     // allocator has filled the content.
1062     if (ClassId != 0 && UNLIKELY(FillContents != NoFill)) {
1063       // This condition is not necessarily unlikely, but since memset is
1064       // costly, we might as well mark it as such.
1065       memset(Block, FillContents == ZeroFill ? 0 : PatternFillByte,
1066              PrimaryT::getSizeByClassId(ClassId));
1067     }
1068 
1069     Chunk::UnpackedHeader Header = {};
1070 
1071     if (UNLIKELY(DefaultAlignedPtr != UserPtr)) {
1072       const uptr Offset = UserPtr - DefaultAlignedPtr;
1073       DCHECK_GE(Offset, 2 * sizeof(u32));
1074       // The BlockMarker has no security purpose, but is specifically meant for
1075       // the chunk iteration function that can be used in debugging situations.
1076       // It is the only situation where we have to locate the start of a chunk
1077       // based on its block address.
1078       reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
1079       reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
1080       Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
1081     }
1082 
1083     Header.ClassId = ClassId & Chunk::ClassIdMask;
1084     Header.State = Chunk::State::Allocated;
1085     Header.OriginOrWasZeroed = Origin & Chunk::OriginMask;
1086     Header.SizeOrUnusedBytes = SizeOrUnusedBytes & Chunk::SizeOrUnusedBytesMask;
1087     Chunk::storeHeader(Cookie, reinterpret_cast<void *>(addHeaderTag(UserPtr)),
1088                        &Header);
1089 
1090     return reinterpret_cast<void *>(UserPtr);
1091   }
1092 
1093   NOINLINE void *
initChunkWithMemoryTagging(const uptr ClassId,const Chunk::Origin Origin,void * Block,const uptr UserPtr,const uptr Size,const uptr SizeOrUnusedBytes,const FillContentsMode FillContents)1094   initChunkWithMemoryTagging(const uptr ClassId, const Chunk::Origin Origin,
1095                              void *Block, const uptr UserPtr, const uptr Size,
1096                              const uptr SizeOrUnusedBytes,
1097                              const FillContentsMode FillContents) {
1098     const Options Options = Primary.Options.load();
1099     DCHECK(useMemoryTagging<AllocatorConfig>(Options));
1100 
1101     // Compute the default pointer before adding the header tag
1102     const uptr DefaultAlignedPtr =
1103         reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
1104 
1105     void *Ptr = reinterpret_cast<void *>(UserPtr);
1106     void *TaggedPtr = Ptr;
1107 
1108     if (LIKELY(ClassId)) {
1109       // Init the primary chunk.
1110       //
1111       // We only need to zero or tag the contents for Primary backed
1112       // allocations. We only set tags for primary allocations in order to avoid
1113       // faulting potentially large numbers of pages for large secondary
1114       // allocations. We assume that guard pages are enough to protect these
1115       // allocations.
1116       //
1117       // FIXME: When the kernel provides a way to set the background tag of a
1118       // mapping, we should be able to tag secondary allocations as well.
1119       //
1120       // When memory tagging is enabled, zeroing the contents is done as part of
1121       // setting the tag.
1122 
1123       Chunk::UnpackedHeader Header;
1124       const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId);
1125       const uptr BlockUptr = reinterpret_cast<uptr>(Block);
1126       const uptr BlockEnd = BlockUptr + BlockSize;
1127       // If possible, try to reuse the UAF tag that was set by deallocate().
1128       // For simplicity, only reuse tags if we have the same start address as
1129       // the previous allocation. This handles the majority of cases since
1130       // most allocations will not be more aligned than the minimum alignment.
1131       //
1132       // We need to handle situations involving reclaimed chunks, and retag
1133       // the reclaimed portions if necessary. In the case where the chunk is
1134       // fully reclaimed, the chunk's header will be zero, which will trigger
1135       // the code path for new mappings and invalid chunks that prepares the
1136       // chunk from scratch. There are three possibilities for partial
1137       // reclaiming:
1138       //
1139       // (1) Header was reclaimed, data was partially reclaimed.
1140       // (2) Header was not reclaimed, all data was reclaimed (e.g. because
1141       //     data started on a page boundary).
1142       // (3) Header was not reclaimed, data was partially reclaimed.
1143       //
1144       // Case (1) will be handled in the same way as for full reclaiming,
1145       // since the header will be zero.
1146       //
1147       // We can detect case (2) by loading the tag from the start
1148       // of the chunk. If it is zero, it means that either all data was
1149       // reclaimed (since we never use zero as the chunk tag), or that the
1150       // previous allocation was of size zero. Either way, we need to prepare
1151       // a new chunk from scratch.
1152       //
1153       // We can detect case (3) by moving to the next page (if covered by the
1154       // chunk) and loading the tag of its first granule. If it is zero, it
1155       // means that all following pages may need to be retagged. On the other
1156       // hand, if it is nonzero, we can assume that all following pages are
1157       // still tagged, according to the logic that if any of the pages
1158       // following the next page were reclaimed, the next page would have been
1159       // reclaimed as well.
1160       uptr TaggedUserPtr;
1161       uptr PrevUserPtr;
1162       if (getChunkFromBlock(BlockUptr, &PrevUserPtr, &Header) &&
1163           PrevUserPtr == UserPtr &&
1164           (TaggedUserPtr = loadTag(UserPtr)) != UserPtr) {
1165         uptr PrevEnd = TaggedUserPtr + Header.SizeOrUnusedBytes;
1166         const uptr NextPage = roundUp(TaggedUserPtr, getPageSizeCached());
1167         if (NextPage < PrevEnd && loadTag(NextPage) != NextPage)
1168           PrevEnd = NextPage;
1169         TaggedPtr = reinterpret_cast<void *>(TaggedUserPtr);
1170         resizeTaggedChunk(PrevEnd, TaggedUserPtr + Size, Size, BlockEnd);
1171         if (UNLIKELY(FillContents != NoFill && !Header.OriginOrWasZeroed)) {
1172           // If an allocation needs to be zeroed (i.e. calloc) we can normally
1173           // avoid zeroing the memory now since we can rely on memory having
1174           // been zeroed on free, as this is normally done while setting the
1175           // UAF tag. But if tagging was disabled per-thread when the memory
1176           // was freed, it would not have been retagged and thus zeroed, and
1177           // therefore it needs to be zeroed now.
1178           memset(TaggedPtr, 0,
1179                  Min(Size, roundUp(PrevEnd - TaggedUserPtr,
1180                                    archMemoryTagGranuleSize())));
1181         } else if (Size) {
1182           // Clear any stack metadata that may have previously been stored in
1183           // the chunk data.
1184           memset(TaggedPtr, 0, archMemoryTagGranuleSize());
1185         }
1186       } else {
1187         const uptr OddEvenMask =
1188             computeOddEvenMaskForPointerMaybe(Options, BlockUptr, ClassId);
1189         TaggedPtr = prepareTaggedChunk(Ptr, Size, OddEvenMask, BlockEnd);
1190       }
1191       storePrimaryAllocationStackMaybe(Options, Ptr);
1192     } else {
1193       // Init the secondary chunk.
1194 
1195       Block = addHeaderTag(Block);
1196       Ptr = addHeaderTag(Ptr);
1197       storeTags(reinterpret_cast<uptr>(Block), reinterpret_cast<uptr>(Ptr));
1198       storeSecondaryAllocationStackMaybe(Options, Ptr, Size);
1199     }
1200 
1201     Chunk::UnpackedHeader Header = {};
1202 
1203     if (UNLIKELY(DefaultAlignedPtr != UserPtr)) {
1204       const uptr Offset = UserPtr - DefaultAlignedPtr;
1205       DCHECK_GE(Offset, 2 * sizeof(u32));
1206       // The BlockMarker has no security purpose, but is specifically meant for
1207       // the chunk iteration function that can be used in debugging situations.
1208       // It is the only situation where we have to locate the start of a chunk
1209       // based on its block address.
1210       reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
1211       reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
1212       Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
1213     }
1214 
1215     Header.ClassId = ClassId & Chunk::ClassIdMask;
1216     Header.State = Chunk::State::Allocated;
1217     Header.OriginOrWasZeroed = Origin & Chunk::OriginMask;
1218     Header.SizeOrUnusedBytes = SizeOrUnusedBytes & Chunk::SizeOrUnusedBytesMask;
1219     Chunk::storeHeader(Cookie, Ptr, &Header);
1220 
1221     return TaggedPtr;
1222   }
1223 
quarantineOrDeallocateChunk(const Options & Options,void * TaggedPtr,Chunk::UnpackedHeader * Header,uptr Size)1224   void quarantineOrDeallocateChunk(const Options &Options, void *TaggedPtr,
1225                                    Chunk::UnpackedHeader *Header,
1226                                    uptr Size) NO_THREAD_SAFETY_ANALYSIS {
1227     void *Ptr = getHeaderTaggedPointer(TaggedPtr);
1228     // If the quarantine is disabled, the actual size of a chunk is 0 or larger
1229     // than the maximum allowed, we return a chunk directly to the backend.
1230     // This purposefully underflows for Size == 0.
1231     const bool BypassQuarantine = !Quarantine.getCacheSize() ||
1232                                   ((Size - 1) >= QuarantineMaxChunkSize) ||
1233                                   !Header->ClassId;
1234     if (BypassQuarantine)
1235       Header->State = Chunk::State::Available;
1236     else
1237       Header->State = Chunk::State::Quarantined;
1238 
1239     void *BlockBegin;
1240     if (LIKELY(!useMemoryTagging<AllocatorConfig>(Options))) {
1241       Header->OriginOrWasZeroed = 0U;
1242       if (BypassQuarantine && allocatorSupportsMemoryTagging<AllocatorConfig>())
1243         Ptr = untagPointer(Ptr);
1244       BlockBegin = getBlockBegin(Ptr, Header);
1245     } else {
1246       Header->OriginOrWasZeroed =
1247           Header->ClassId && !TSDRegistry.getDisableMemInit();
1248       BlockBegin =
1249           retagBlock(Options, TaggedPtr, Ptr, Header, Size, BypassQuarantine);
1250     }
1251 
1252     Chunk::storeHeader(Cookie, Ptr, Header);
1253 
1254     if (BypassQuarantine) {
1255       const uptr ClassId = Header->ClassId;
1256       if (LIKELY(ClassId)) {
1257         bool CacheDrained;
1258         {
1259           typename TSDRegistryT::ScopedTSD TSD(TSDRegistry);
1260           CacheDrained = TSD->getCache().deallocate(ClassId, BlockBegin);
1261         }
1262         // When we have drained some blocks back to the Primary from TSD, that
1263         // implies that we may have the chance to release some pages as well.
1264         // Note that in order not to block other thread's accessing the TSD,
1265         // release the TSD first then try the page release.
1266         if (CacheDrained)
1267           Primary.tryReleaseToOS(ClassId, ReleaseToOS::Normal);
1268       } else {
1269         Secondary.deallocate(Options, BlockBegin);
1270       }
1271     } else {
1272       typename TSDRegistryT::ScopedTSD TSD(TSDRegistry);
1273       Quarantine.put(&TSD->getQuarantineCache(),
1274                      QuarantineCallback(*this, TSD->getCache()), Ptr, Size);
1275     }
1276   }
1277 
retagBlock(const Options & Options,void * TaggedPtr,void * & Ptr,Chunk::UnpackedHeader * Header,const uptr Size,bool BypassQuarantine)1278   NOINLINE void *retagBlock(const Options &Options, void *TaggedPtr, void *&Ptr,
1279                             Chunk::UnpackedHeader *Header, const uptr Size,
1280                             bool BypassQuarantine) {
1281     DCHECK(useMemoryTagging<AllocatorConfig>(Options));
1282 
1283     const u8 PrevTag = extractTag(reinterpret_cast<uptr>(TaggedPtr));
1284     storeDeallocationStackMaybe(Options, Ptr, PrevTag, Size);
1285     if (Header->ClassId && !TSDRegistry.getDisableMemInit()) {
1286       uptr TaggedBegin, TaggedEnd;
1287       const uptr OddEvenMask = computeOddEvenMaskForPointerMaybe(
1288           Options, reinterpret_cast<uptr>(getBlockBegin(Ptr, Header)),
1289           Header->ClassId);
1290       // Exclude the previous tag so that immediate use after free is
1291       // detected 100% of the time.
1292       setRandomTag(Ptr, Size, OddEvenMask | (1UL << PrevTag), &TaggedBegin,
1293                    &TaggedEnd);
1294     }
1295 
1296     Ptr = untagPointer(Ptr);
1297     void *BlockBegin = getBlockBegin(Ptr, Header);
1298     if (BypassQuarantine && !Header->ClassId) {
1299       storeTags(reinterpret_cast<uptr>(BlockBegin),
1300                 reinterpret_cast<uptr>(Ptr));
1301     }
1302 
1303     return BlockBegin;
1304   }
1305 
getChunkFromBlock(uptr Block,uptr * Chunk,Chunk::UnpackedHeader * Header)1306   bool getChunkFromBlock(uptr Block, uptr *Chunk,
1307                          Chunk::UnpackedHeader *Header) {
1308     *Chunk =
1309         Block + getChunkOffsetFromBlock(reinterpret_cast<const char *>(Block));
1310     return Chunk::isValid(Cookie, reinterpret_cast<void *>(*Chunk), Header);
1311   }
1312 
getChunkOffsetFromBlock(const char * Block)1313   static uptr getChunkOffsetFromBlock(const char *Block) {
1314     u32 Offset = 0;
1315     if (reinterpret_cast<const u32 *>(Block)[0] == BlockMarker)
1316       Offset = reinterpret_cast<const u32 *>(Block)[1];
1317     return Offset + Chunk::getHeaderSize();
1318   }
1319 
1320   // Set the tag of the granule past the end of the allocation to 0, to catch
1321   // linear overflows even if a previous larger allocation used the same block
1322   // and tag. Only do this if the granule past the end is in our block, because
1323   // this would otherwise lead to a SEGV if the allocation covers the entire
1324   // block and our block is at the end of a mapping. The tag of the next block's
1325   // header granule will be set to 0, so it will serve the purpose of catching
1326   // linear overflows in this case.
1327   //
1328   // For allocations of size 0 we do not end up storing the address tag to the
1329   // memory tag space, which getInlineErrorInfo() normally relies on to match
1330   // address tags against chunks. To allow matching in this case we store the
1331   // address tag in the first byte of the chunk.
storeEndMarker(uptr End,uptr Size,uptr BlockEnd)1332   void storeEndMarker(uptr End, uptr Size, uptr BlockEnd) {
1333     DCHECK_EQ(BlockEnd, untagPointer(BlockEnd));
1334     uptr UntaggedEnd = untagPointer(End);
1335     if (UntaggedEnd != BlockEnd) {
1336       storeTag(UntaggedEnd);
1337       if (Size == 0)
1338         *reinterpret_cast<u8 *>(UntaggedEnd) = extractTag(End);
1339     }
1340   }
1341 
prepareTaggedChunk(void * Ptr,uptr Size,uptr ExcludeMask,uptr BlockEnd)1342   void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
1343                            uptr BlockEnd) {
1344     // Prepare the granule before the chunk to store the chunk header by setting
1345     // its tag to 0. Normally its tag will already be 0, but in the case where a
1346     // chunk holding a low alignment allocation is reused for a higher alignment
1347     // allocation, the chunk may already have a non-zero tag from the previous
1348     // allocation.
1349     storeTag(reinterpret_cast<uptr>(Ptr) - archMemoryTagGranuleSize());
1350 
1351     uptr TaggedBegin, TaggedEnd;
1352     setRandomTag(Ptr, Size, ExcludeMask, &TaggedBegin, &TaggedEnd);
1353 
1354     storeEndMarker(TaggedEnd, Size, BlockEnd);
1355     return reinterpret_cast<void *>(TaggedBegin);
1356   }
1357 
resizeTaggedChunk(uptr OldPtr,uptr NewPtr,uptr NewSize,uptr BlockEnd)1358   void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr NewSize,
1359                          uptr BlockEnd) {
1360     uptr RoundOldPtr = roundUp(OldPtr, archMemoryTagGranuleSize());
1361     uptr RoundNewPtr;
1362     if (RoundOldPtr >= NewPtr) {
1363       // If the allocation is shrinking we just need to set the tag past the end
1364       // of the allocation to 0. See explanation in storeEndMarker() above.
1365       RoundNewPtr = roundUp(NewPtr, archMemoryTagGranuleSize());
1366     } else {
1367       // Set the memory tag of the region
1368       // [RoundOldPtr, roundUp(NewPtr, archMemoryTagGranuleSize()))
1369       // to the pointer tag stored in OldPtr.
1370       RoundNewPtr = storeTags(RoundOldPtr, NewPtr);
1371     }
1372     storeEndMarker(RoundNewPtr, NewSize, BlockEnd);
1373   }
1374 
storePrimaryAllocationStackMaybe(const Options & Options,void * Ptr)1375   void storePrimaryAllocationStackMaybe(const Options &Options, void *Ptr) {
1376     if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1377       return;
1378     AllocationRingBuffer *RB = getRingBuffer();
1379     if (!RB)
1380       return;
1381     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1382     Ptr32[MemTagAllocationTraceIndex] = collectStackTrace(RB->Depot);
1383     Ptr32[MemTagAllocationTidIndex] = getThreadID();
1384   }
1385 
storeRingBufferEntry(AllocationRingBuffer * RB,void * Ptr,u32 AllocationTrace,u32 AllocationTid,uptr AllocationSize,u32 DeallocationTrace,u32 DeallocationTid)1386   void storeRingBufferEntry(AllocationRingBuffer *RB, void *Ptr,
1387                             u32 AllocationTrace, u32 AllocationTid,
1388                             uptr AllocationSize, u32 DeallocationTrace,
1389                             u32 DeallocationTid) {
1390     uptr Pos = atomic_fetch_add(&RB->Pos, 1, memory_order_relaxed);
1391     typename AllocationRingBuffer::Entry *Entry =
1392         getRingBufferEntry(RB, Pos % RB->RingBufferElements);
1393 
1394     // First invalidate our entry so that we don't attempt to interpret a
1395     // partially written state in getSecondaryErrorInfo(). The fences below
1396     // ensure that the compiler does not move the stores to Ptr in between the
1397     // stores to the other fields.
1398     atomic_store_relaxed(&Entry->Ptr, 0);
1399 
1400     __atomic_signal_fence(__ATOMIC_SEQ_CST);
1401     atomic_store_relaxed(&Entry->AllocationTrace, AllocationTrace);
1402     atomic_store_relaxed(&Entry->AllocationTid, AllocationTid);
1403     atomic_store_relaxed(&Entry->AllocationSize, AllocationSize);
1404     atomic_store_relaxed(&Entry->DeallocationTrace, DeallocationTrace);
1405     atomic_store_relaxed(&Entry->DeallocationTid, DeallocationTid);
1406     __atomic_signal_fence(__ATOMIC_SEQ_CST);
1407 
1408     atomic_store_relaxed(&Entry->Ptr, reinterpret_cast<uptr>(Ptr));
1409   }
1410 
storeSecondaryAllocationStackMaybe(const Options & Options,void * Ptr,uptr Size)1411   void storeSecondaryAllocationStackMaybe(const Options &Options, void *Ptr,
1412                                           uptr Size) {
1413     if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1414       return;
1415     AllocationRingBuffer *RB = getRingBuffer();
1416     if (!RB)
1417       return;
1418     u32 Trace = collectStackTrace(RB->Depot);
1419     u32 Tid = getThreadID();
1420 
1421     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1422     Ptr32[MemTagAllocationTraceIndex] = Trace;
1423     Ptr32[MemTagAllocationTidIndex] = Tid;
1424 
1425     storeRingBufferEntry(RB, untagPointer(Ptr), Trace, Tid, Size, 0, 0);
1426   }
1427 
storeDeallocationStackMaybe(const Options & Options,void * Ptr,u8 PrevTag,uptr Size)1428   void storeDeallocationStackMaybe(const Options &Options, void *Ptr,
1429                                    u8 PrevTag, uptr Size) {
1430     if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1431       return;
1432     AllocationRingBuffer *RB = getRingBuffer();
1433     if (!RB)
1434       return;
1435     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1436     u32 AllocationTrace = Ptr32[MemTagAllocationTraceIndex];
1437     u32 AllocationTid = Ptr32[MemTagAllocationTidIndex];
1438 
1439     u32 DeallocationTrace = collectStackTrace(RB->Depot);
1440     u32 DeallocationTid = getThreadID();
1441 
1442     storeRingBufferEntry(RB, addFixedTag(untagPointer(Ptr), PrevTag),
1443                          AllocationTrace, AllocationTid, Size,
1444                          DeallocationTrace, DeallocationTid);
1445   }
1446 
1447   static const size_t NumErrorReports =
1448       sizeof(((scudo_error_info *)nullptr)->reports) /
1449       sizeof(((scudo_error_info *)nullptr)->reports[0]);
1450 
getInlineErrorInfo(struct scudo_error_info * ErrorInfo,size_t & NextErrorReport,uintptr_t FaultAddr,const StackDepot * Depot,const char * RegionInfoPtr,const char * Memory,const char * MemoryTags,uintptr_t MemoryAddr,size_t MemorySize,size_t MinDistance,size_t MaxDistance)1451   static void getInlineErrorInfo(struct scudo_error_info *ErrorInfo,
1452                                  size_t &NextErrorReport, uintptr_t FaultAddr,
1453                                  const StackDepot *Depot,
1454                                  const char *RegionInfoPtr, const char *Memory,
1455                                  const char *MemoryTags, uintptr_t MemoryAddr,
1456                                  size_t MemorySize, size_t MinDistance,
1457                                  size_t MaxDistance) {
1458     uptr UntaggedFaultAddr = untagPointer(FaultAddr);
1459     u8 FaultAddrTag = extractTag(FaultAddr);
1460     BlockInfo Info =
1461         PrimaryT::findNearestBlock(RegionInfoPtr, UntaggedFaultAddr);
1462 
1463     auto GetGranule = [&](uptr Addr, const char **Data, uint8_t *Tag) -> bool {
1464       if (Addr < MemoryAddr || Addr + archMemoryTagGranuleSize() < Addr ||
1465           Addr + archMemoryTagGranuleSize() > MemoryAddr + MemorySize)
1466         return false;
1467       *Data = &Memory[Addr - MemoryAddr];
1468       *Tag = static_cast<u8>(
1469           MemoryTags[(Addr - MemoryAddr) / archMemoryTagGranuleSize()]);
1470       return true;
1471     };
1472 
1473     auto ReadBlock = [&](uptr Addr, uptr *ChunkAddr,
1474                          Chunk::UnpackedHeader *Header, const u32 **Data,
1475                          u8 *Tag) {
1476       const char *BlockBegin;
1477       u8 BlockBeginTag;
1478       if (!GetGranule(Addr, &BlockBegin, &BlockBeginTag))
1479         return false;
1480       uptr ChunkOffset = getChunkOffsetFromBlock(BlockBegin);
1481       *ChunkAddr = Addr + ChunkOffset;
1482 
1483       const char *ChunkBegin;
1484       if (!GetGranule(*ChunkAddr, &ChunkBegin, Tag))
1485         return false;
1486       *Header = *reinterpret_cast<const Chunk::UnpackedHeader *>(
1487           ChunkBegin - Chunk::getHeaderSize());
1488       *Data = reinterpret_cast<const u32 *>(ChunkBegin);
1489 
1490       // Allocations of size 0 will have stashed the tag in the first byte of
1491       // the chunk, see storeEndMarker().
1492       if (Header->SizeOrUnusedBytes == 0)
1493         *Tag = static_cast<u8>(*ChunkBegin);
1494 
1495       return true;
1496     };
1497 
1498     if (NextErrorReport == NumErrorReports)
1499       return;
1500 
1501     auto CheckOOB = [&](uptr BlockAddr) {
1502       if (BlockAddr < Info.RegionBegin || BlockAddr >= Info.RegionEnd)
1503         return false;
1504 
1505       uptr ChunkAddr;
1506       Chunk::UnpackedHeader Header;
1507       const u32 *Data;
1508       uint8_t Tag;
1509       if (!ReadBlock(BlockAddr, &ChunkAddr, &Header, &Data, &Tag) ||
1510           Header.State != Chunk::State::Allocated || Tag != FaultAddrTag)
1511         return false;
1512 
1513       auto *R = &ErrorInfo->reports[NextErrorReport++];
1514       R->error_type =
1515           UntaggedFaultAddr < ChunkAddr ? BUFFER_UNDERFLOW : BUFFER_OVERFLOW;
1516       R->allocation_address = ChunkAddr;
1517       R->allocation_size = Header.SizeOrUnusedBytes;
1518       if (Depot) {
1519         collectTraceMaybe(Depot, R->allocation_trace,
1520                           Data[MemTagAllocationTraceIndex]);
1521       }
1522       R->allocation_tid = Data[MemTagAllocationTidIndex];
1523       return NextErrorReport == NumErrorReports;
1524     };
1525 
1526     if (MinDistance == 0 && CheckOOB(Info.BlockBegin))
1527       return;
1528 
1529     for (size_t I = Max<size_t>(MinDistance, 1); I != MaxDistance; ++I)
1530       if (CheckOOB(Info.BlockBegin + I * Info.BlockSize) ||
1531           CheckOOB(Info.BlockBegin - I * Info.BlockSize))
1532         return;
1533   }
1534 
getRingBufferErrorInfo(struct scudo_error_info * ErrorInfo,size_t & NextErrorReport,uintptr_t FaultAddr,const StackDepot * Depot,const char * RingBufferPtr,size_t RingBufferSize)1535   static void getRingBufferErrorInfo(struct scudo_error_info *ErrorInfo,
1536                                      size_t &NextErrorReport,
1537                                      uintptr_t FaultAddr,
1538                                      const StackDepot *Depot,
1539                                      const char *RingBufferPtr,
1540                                      size_t RingBufferSize) {
1541     auto *RingBuffer =
1542         reinterpret_cast<const AllocationRingBuffer *>(RingBufferPtr);
1543     size_t RingBufferElements = ringBufferElementsFromBytes(RingBufferSize);
1544     if (!RingBuffer || RingBufferElements == 0 || !Depot)
1545       return;
1546     uptr Pos = atomic_load_relaxed(&RingBuffer->Pos);
1547 
1548     for (uptr I = Pos - 1; I != Pos - 1 - RingBufferElements &&
1549                            NextErrorReport != NumErrorReports;
1550          --I) {
1551       auto *Entry = getRingBufferEntry(RingBuffer, I % RingBufferElements);
1552       uptr EntryPtr = atomic_load_relaxed(&Entry->Ptr);
1553       if (!EntryPtr)
1554         continue;
1555 
1556       uptr UntaggedEntryPtr = untagPointer(EntryPtr);
1557       uptr EntrySize = atomic_load_relaxed(&Entry->AllocationSize);
1558       u32 AllocationTrace = atomic_load_relaxed(&Entry->AllocationTrace);
1559       u32 AllocationTid = atomic_load_relaxed(&Entry->AllocationTid);
1560       u32 DeallocationTrace = atomic_load_relaxed(&Entry->DeallocationTrace);
1561       u32 DeallocationTid = atomic_load_relaxed(&Entry->DeallocationTid);
1562 
1563       if (DeallocationTid) {
1564         // For UAF we only consider in-bounds fault addresses because
1565         // out-of-bounds UAF is rare and attempting to detect it is very likely
1566         // to result in false positives.
1567         if (FaultAddr < EntryPtr || FaultAddr >= EntryPtr + EntrySize)
1568           continue;
1569       } else {
1570         // Ring buffer OOB is only possible with secondary allocations. In this
1571         // case we are guaranteed a guard region of at least a page on either
1572         // side of the allocation (guard page on the right, guard page + tagged
1573         // region on the left), so ignore any faults outside of that range.
1574         if (FaultAddr < EntryPtr - getPageSizeCached() ||
1575             FaultAddr >= EntryPtr + EntrySize + getPageSizeCached())
1576           continue;
1577 
1578         // For UAF the ring buffer will contain two entries, one for the
1579         // allocation and another for the deallocation. Don't report buffer
1580         // overflow/underflow using the allocation entry if we have already
1581         // collected a report from the deallocation entry.
1582         bool Found = false;
1583         for (uptr J = 0; J != NextErrorReport; ++J) {
1584           if (ErrorInfo->reports[J].allocation_address == UntaggedEntryPtr) {
1585             Found = true;
1586             break;
1587           }
1588         }
1589         if (Found)
1590           continue;
1591       }
1592 
1593       auto *R = &ErrorInfo->reports[NextErrorReport++];
1594       if (DeallocationTid)
1595         R->error_type = USE_AFTER_FREE;
1596       else if (FaultAddr < EntryPtr)
1597         R->error_type = BUFFER_UNDERFLOW;
1598       else
1599         R->error_type = BUFFER_OVERFLOW;
1600 
1601       R->allocation_address = UntaggedEntryPtr;
1602       R->allocation_size = EntrySize;
1603       collectTraceMaybe(Depot, R->allocation_trace, AllocationTrace);
1604       R->allocation_tid = AllocationTid;
1605       collectTraceMaybe(Depot, R->deallocation_trace, DeallocationTrace);
1606       R->deallocation_tid = DeallocationTid;
1607     }
1608   }
1609 
getStats(ScopedString * Str)1610   uptr getStats(ScopedString *Str) {
1611     Primary.getStats(Str);
1612     Secondary.getStats(Str);
1613     Quarantine.getStats(Str);
1614     TSDRegistry.getStats(Str);
1615     return Str->length();
1616   }
1617 
1618   static typename AllocationRingBuffer::Entry *
getRingBufferEntry(AllocationRingBuffer * RB,uptr N)1619   getRingBufferEntry(AllocationRingBuffer *RB, uptr N) {
1620     char *RBEntryStart =
1621         &reinterpret_cast<char *>(RB)[sizeof(AllocationRingBuffer)];
1622     return &reinterpret_cast<typename AllocationRingBuffer::Entry *>(
1623         RBEntryStart)[N];
1624   }
1625   static const typename AllocationRingBuffer::Entry *
getRingBufferEntry(const AllocationRingBuffer * RB,uptr N)1626   getRingBufferEntry(const AllocationRingBuffer *RB, uptr N) {
1627     const char *RBEntryStart =
1628         &reinterpret_cast<const char *>(RB)[sizeof(AllocationRingBuffer)];
1629     return &reinterpret_cast<const typename AllocationRingBuffer::Entry *>(
1630         RBEntryStart)[N];
1631   }
1632 
initRingBufferMaybe()1633   void initRingBufferMaybe() {
1634     ScopedLock L(RingBufferInitLock);
1635     if (getRingBuffer() != nullptr)
1636       return;
1637 
1638     int ring_buffer_size = getFlags()->allocation_ring_buffer_size;
1639     if (ring_buffer_size <= 0)
1640       return;
1641 
1642     u32 AllocationRingBufferSize = static_cast<u32>(ring_buffer_size);
1643 
1644     // We store alloc and free stacks for each entry.
1645     constexpr u32 kStacksPerRingBufferEntry = 2;
1646     constexpr u32 kMaxU32Pow2 = ~(UINT32_MAX >> 1);
1647     static_assert(isPowerOfTwo(kMaxU32Pow2));
1648     // On Android we always have 3 frames at the bottom: __start_main,
1649     // __libc_init, main, and 3 at the top: malloc, scudo_malloc and
1650     // Allocator::allocate. This leaves 10 frames for the user app. The next
1651     // smallest power of two (8) would only leave 2, which is clearly too
1652     // little.
1653     constexpr u32 kFramesPerStack = 16;
1654     static_assert(isPowerOfTwo(kFramesPerStack));
1655 
1656     if (AllocationRingBufferSize > kMaxU32Pow2 / kStacksPerRingBufferEntry)
1657       return;
1658     u32 TabSize = static_cast<u32>(roundUpPowerOfTwo(kStacksPerRingBufferEntry *
1659                                                      AllocationRingBufferSize));
1660     if (TabSize > UINT32_MAX / kFramesPerStack)
1661       return;
1662     u32 RingSize = static_cast<u32>(TabSize * kFramesPerStack);
1663 
1664     uptr StackDepotSize = sizeof(StackDepot) + sizeof(atomic_u64) * RingSize +
1665                           sizeof(atomic_u32) * TabSize;
1666     MemMapT DepotMap;
1667     DepotMap.map(
1668         /*Addr=*/0U, roundUp(StackDepotSize, getPageSizeCached()),
1669         "scudo:stack_depot");
1670     auto *Depot = reinterpret_cast<StackDepot *>(DepotMap.getBase());
1671     Depot->init(RingSize, TabSize);
1672 
1673     MemMapT MemMap;
1674     MemMap.map(
1675         /*Addr=*/0U,
1676         roundUp(ringBufferSizeInBytes(AllocationRingBufferSize),
1677                 getPageSizeCached()),
1678         "scudo:ring_buffer");
1679     auto *RB = reinterpret_cast<AllocationRingBuffer *>(MemMap.getBase());
1680     RB->RawRingBufferMap = MemMap;
1681     RB->RingBufferElements = AllocationRingBufferSize;
1682     RB->Depot = Depot;
1683     RB->StackDepotSize = StackDepotSize;
1684     RB->RawStackDepotMap = DepotMap;
1685 
1686     atomic_store(&RingBufferAddress, reinterpret_cast<uptr>(RB),
1687                  memory_order_release);
1688   }
1689 
unmapRingBuffer()1690   void unmapRingBuffer() {
1691     AllocationRingBuffer *RB = getRingBuffer();
1692     if (RB == nullptr)
1693       return;
1694     // N.B. because RawStackDepotMap is part of RawRingBufferMap, the order
1695     // is very important.
1696     RB->RawStackDepotMap.unmap(RB->RawStackDepotMap.getBase(),
1697                                RB->RawStackDepotMap.getCapacity());
1698     // Note that the `RB->RawRingBufferMap` is stored on the pages managed by
1699     // itself. Take over the ownership before calling unmap() so that any
1700     // operation along with unmap() won't touch inaccessible pages.
1701     MemMapT RawRingBufferMap = RB->RawRingBufferMap;
1702     RawRingBufferMap.unmap(RawRingBufferMap.getBase(),
1703                            RawRingBufferMap.getCapacity());
1704     atomic_store(&RingBufferAddress, 0, memory_order_release);
1705   }
1706 
ringBufferSizeInBytes(u32 RingBufferElements)1707   static constexpr size_t ringBufferSizeInBytes(u32 RingBufferElements) {
1708     return sizeof(AllocationRingBuffer) +
1709            RingBufferElements * sizeof(typename AllocationRingBuffer::Entry);
1710   }
1711 
ringBufferElementsFromBytes(size_t Bytes)1712   static constexpr size_t ringBufferElementsFromBytes(size_t Bytes) {
1713     if (Bytes < sizeof(AllocationRingBuffer)) {
1714       return 0;
1715     }
1716     return (Bytes - sizeof(AllocationRingBuffer)) /
1717            sizeof(typename AllocationRingBuffer::Entry);
1718   }
1719 };
1720 
1721 } // namespace scudo
1722 
1723 #endif // SCUDO_COMBINED_H_
1724