• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- sanitizer_persistent_allocator.h ------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // A fast memory allocator that does not support free() nor realloc().
10 // All allocations are forever.
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef SANITIZER_PERSISTENT_ALLOCATOR_H
14 #define SANITIZER_PERSISTENT_ALLOCATOR_H
15 
16 #include "sanitizer_internal_defs.h"
17 #include "sanitizer_mutex.h"
18 #include "sanitizer_atomic.h"
19 #include "sanitizer_common.h"
20 
21 namespace __sanitizer {
22 
23 class PersistentAllocator {
24  public:
25   void *alloc(uptr size);
26 
27  private:
28   void *tryAlloc(uptr size);
29   StaticSpinMutex mtx;  // Protects alloc of new blocks for region allocator.
30   atomic_uintptr_t region_pos;  // Region allocator for Node's.
31   atomic_uintptr_t region_end;
32 };
33 
tryAlloc(uptr size)34 inline void *PersistentAllocator::tryAlloc(uptr size) {
35   // Optimisic lock-free allocation, essentially try to bump the region ptr.
36   for (;;) {
37     uptr cmp = atomic_load(&region_pos, memory_order_acquire);
38     uptr end = atomic_load(&region_end, memory_order_acquire);
39     if (cmp == 0 || cmp + size > end) return nullptr;
40     if (atomic_compare_exchange_weak(&region_pos, &cmp, cmp + size,
41                                      memory_order_acquire))
42       return (void *)cmp;
43   }
44 }
45 
alloc(uptr size)46 inline void *PersistentAllocator::alloc(uptr size) {
47   // First, try to allocate optimisitically.
48   void *s = tryAlloc(size);
49   if (s) return s;
50   // If failed, lock, retry and alloc new superblock.
51   SpinMutexLock l(&mtx);
52   for (;;) {
53     s = tryAlloc(size);
54     if (s) return s;
55     atomic_store(&region_pos, 0, memory_order_relaxed);
56     uptr allocsz = 64 * 1024;
57     if (allocsz < size) allocsz = size;
58     uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
59     atomic_store(&region_end, mem + allocsz, memory_order_release);
60     atomic_store(&region_pos, mem, memory_order_release);
61   }
62 }
63 
64 extern PersistentAllocator thePersistentAllocator;
PersistentAlloc(uptr sz)65 inline void *PersistentAlloc(uptr sz) {
66   return thePersistentAllocator.alloc(sz);
67 }
68 
69 } // namespace __sanitizer
70 
71 #endif // SANITIZER_PERSISTENT_ALLOCATOR_H
72