1 //===-- sanitizer_allocator.cc --------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is shared between AddressSanitizer and ThreadSanitizer
11 // run-time libraries.
12 // This allocator is used inside run-times.
13 //===----------------------------------------------------------------------===//
14
15 #include "sanitizer_allocator.h"
16 #include "sanitizer_allocator_internal.h"
17 #include "sanitizer_common.h"
18
19 namespace __sanitizer {
20
21 // ThreadSanitizer for Go uses libc malloc/free.
22 #if defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
23 # if SANITIZER_LINUX && !SANITIZER_ANDROID
24 extern "C" void *__libc_malloc(uptr size);
25 extern "C" void __libc_free(void *ptr);
26 # define LIBC_MALLOC __libc_malloc
27 # define LIBC_FREE __libc_free
28 # else
29 # include <stdlib.h>
30 # define LIBC_MALLOC malloc
31 # define LIBC_FREE free
32 # endif
33
RawInternalAlloc(uptr size,InternalAllocatorCache * cache)34 static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) {
35 (void)cache;
36 return LIBC_MALLOC(size);
37 }
38
RawInternalFree(void * ptr,InternalAllocatorCache * cache)39 static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
40 (void)cache;
41 LIBC_FREE(ptr);
42 }
43
internal_allocator()44 InternalAllocator *internal_allocator() {
45 return 0;
46 }
47
48 #else // SANITIZER_GO
49
50 static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
51 static atomic_uint8_t internal_allocator_initialized;
52 static StaticSpinMutex internal_alloc_init_mu;
53
54 static InternalAllocatorCache internal_allocator_cache;
55 static StaticSpinMutex internal_allocator_cache_mu;
56
57 InternalAllocator *internal_allocator() {
58 InternalAllocator *internal_allocator_instance =
59 reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder);
60 if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) {
61 SpinMutexLock l(&internal_alloc_init_mu);
62 if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
63 0) {
64 internal_allocator_instance->Init(/* may_return_null*/ false);
65 atomic_store(&internal_allocator_initialized, 1, memory_order_release);
66 }
67 }
68 return internal_allocator_instance;
69 }
70
71 static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) {
72 if (cache == 0) {
73 SpinMutexLock l(&internal_allocator_cache_mu);
74 return internal_allocator()->Allocate(&internal_allocator_cache, size, 8,
75 false);
76 }
77 return internal_allocator()->Allocate(cache, size, 8, false);
78 }
79
80 static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
81 if (!cache) {
82 SpinMutexLock l(&internal_allocator_cache_mu);
83 return internal_allocator()->Deallocate(&internal_allocator_cache, ptr);
84 }
85 internal_allocator()->Deallocate(cache, ptr);
86 }
87
88 #endif // SANITIZER_GO
89
90 const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
91
InternalAlloc(uptr size,InternalAllocatorCache * cache)92 void *InternalAlloc(uptr size, InternalAllocatorCache *cache) {
93 if (size + sizeof(u64) < size)
94 return nullptr;
95 void *p = RawInternalAlloc(size + sizeof(u64), cache);
96 if (!p)
97 return nullptr;
98 ((u64*)p)[0] = kBlockMagic;
99 return (char*)p + sizeof(u64);
100 }
101
InternalFree(void * addr,InternalAllocatorCache * cache)102 void InternalFree(void *addr, InternalAllocatorCache *cache) {
103 if (!addr)
104 return;
105 addr = (char*)addr - sizeof(u64);
106 CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
107 ((u64*)addr)[0] = 0;
108 RawInternalFree(addr, cache);
109 }
110
111 // LowLevelAllocator
112 static LowLevelAllocateCallback low_level_alloc_callback;
113
Allocate(uptr size)114 void *LowLevelAllocator::Allocate(uptr size) {
115 // Align allocation size.
116 size = RoundUpTo(size, 8);
117 if (allocated_end_ - allocated_current_ < (sptr)size) {
118 uptr size_to_allocate = Max(size, GetPageSizeCached());
119 allocated_current_ =
120 (char*)MmapOrDie(size_to_allocate, __func__);
121 allocated_end_ = allocated_current_ + size_to_allocate;
122 if (low_level_alloc_callback) {
123 low_level_alloc_callback((uptr)allocated_current_,
124 size_to_allocate);
125 }
126 }
127 CHECK(allocated_end_ - allocated_current_ >= (sptr)size);
128 void *res = allocated_current_;
129 allocated_current_ += size;
130 return res;
131 }
132
SetLowLevelAllocateCallback(LowLevelAllocateCallback callback)133 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) {
134 low_level_alloc_callback = callback;
135 }
136
CallocShouldReturnNullDueToOverflow(uptr size,uptr n)137 bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
138 if (!size) return false;
139 uptr max = (uptr)-1L;
140 return (max / size) < n;
141 }
142
ReportAllocatorCannotReturnNull()143 void NORETURN ReportAllocatorCannotReturnNull() {
144 Report("%s's allocator is terminating the process instead of returning 0\n",
145 SanitizerToolName);
146 Report("If you don't like this behavior set allocator_may_return_null=1\n");
147 CHECK(0);
148 Die();
149 }
150
151 } // namespace __sanitizer
152