• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- sanitizer_persistent_allocator.h ------------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // A fast memory allocator that does not support free() nor realloc().
11 // All allocations are forever.
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef SANITIZER_PERSISTENT_ALLOCATOR_H
15 #define SANITIZER_PERSISTENT_ALLOCATOR_H
16 
17 #include "sanitizer_internal_defs.h"
18 #include "sanitizer_mutex.h"
19 #include "sanitizer_atomic.h"
20 #include "sanitizer_common.h"
21 
22 namespace __sanitizer {
23 
24 class PersistentAllocator {
25  public:
26   void *alloc(uptr size);
27 
28  private:
29   void *tryAlloc(uptr size);
30   StaticSpinMutex mtx;  // Protects alloc of new blocks for region allocator.
31   atomic_uintptr_t region_pos;  // Region allocator for Node's.
32   atomic_uintptr_t region_end;
33 };
34 
tryAlloc(uptr size)35 inline void *PersistentAllocator::tryAlloc(uptr size) {
36   // Optimisic lock-free allocation, essentially try to bump the region ptr.
37   for (;;) {
38     uptr cmp = atomic_load(&region_pos, memory_order_acquire);
39     uptr end = atomic_load(&region_end, memory_order_acquire);
40     if (cmp == 0 || cmp + size > end) return nullptr;
41     if (atomic_compare_exchange_weak(&region_pos, &cmp, cmp + size,
42                                      memory_order_acquire))
43       return (void *)cmp;
44   }
45 }
46 
alloc(uptr size)47 inline void *PersistentAllocator::alloc(uptr size) {
48   // First, try to allocate optimisitically.
49   void *s = tryAlloc(size);
50   if (s) return s;
51   // If failed, lock, retry and alloc new superblock.
52   SpinMutexLock l(&mtx);
53   for (;;) {
54     s = tryAlloc(size);
55     if (s) return s;
56     atomic_store(&region_pos, 0, memory_order_relaxed);
57     uptr allocsz = 64 * 1024;
58     if (allocsz < size) allocsz = size;
59     uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
60     atomic_store(&region_end, mem + allocsz, memory_order_release);
61     atomic_store(&region_pos, mem, memory_order_release);
62   }
63 }
64 
65 extern PersistentAllocator thePersistentAllocator;
PersistentAlloc(uptr sz)66 inline void *PersistentAlloc(uptr sz) {
67   return thePersistentAllocator.alloc(sz);
68 }
69 
70 } // namespace __sanitizer
71 
72 #endif // SANITIZER_PERSISTENT_ALLOCATOR_H
73