1 //=-- lsan_allocator.cc ---------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of LeakSanitizer.
11 // See lsan_allocator.h for details.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "lsan_allocator.h"
16
17 #include "sanitizer_common/sanitizer_allocator.h"
18 #include "sanitizer_common/sanitizer_internal_defs.h"
19 #include "sanitizer_common/sanitizer_stackdepot.h"
20 #include "sanitizer_common/sanitizer_stacktrace.h"
21 #include "lsan_common.h"
22
23 extern "C" void *memset(void *ptr, int value, uptr num);
24
25 namespace __lsan {
26
27 static const uptr kMaxAllowedMallocSize = 8UL << 30;
28 static const uptr kAllocatorSpace = 0x600000000000ULL;
29 static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
30
31 struct ChunkMetadata {
32 bool allocated : 8; // Must be first.
33 ChunkTag tag : 2;
34 uptr requested_size : 54;
35 u32 stack_trace_id;
36 };
37
38 typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
39 sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator;
40 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
41 typedef LargeMmapAllocator<> SecondaryAllocator;
42 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
43 SecondaryAllocator> Allocator;
44
45 static Allocator allocator;
46 static THREADLOCAL AllocatorCache cache;
47
InitializeAllocator()48 void InitializeAllocator() {
49 allocator.Init();
50 }
51
AllocatorThreadFinish()52 void AllocatorThreadFinish() {
53 allocator.SwallowCache(&cache);
54 }
55
Metadata(void * p)56 static ChunkMetadata *Metadata(void *p) {
57 return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
58 }
59
RegisterAllocation(const StackTrace & stack,void * p,uptr size)60 static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
61 if (!p) return;
62 ChunkMetadata *m = Metadata(p);
63 CHECK(m);
64 m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
65 m->stack_trace_id = StackDepotPut(stack.trace, stack.size);
66 m->requested_size = size;
67 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
68 }
69
RegisterDeallocation(void * p)70 static void RegisterDeallocation(void *p) {
71 if (!p) return;
72 ChunkMetadata *m = Metadata(p);
73 CHECK(m);
74 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
75 }
76
Allocate(const StackTrace & stack,uptr size,uptr alignment,bool cleared)77 void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
78 bool cleared) {
79 if (size == 0)
80 size = 1;
81 if (size > kMaxAllowedMallocSize) {
82 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
83 return 0;
84 }
85 void *p = allocator.Allocate(&cache, size, alignment, false);
86 // Do not rely on the allocator to clear the memory (it's slow).
87 if (cleared && allocator.FromPrimary(p))
88 memset(p, 0, size);
89 RegisterAllocation(stack, p, size);
90 return p;
91 }
92
Deallocate(void * p)93 void Deallocate(void *p) {
94 RegisterDeallocation(p);
95 allocator.Deallocate(&cache, p);
96 }
97
Reallocate(const StackTrace & stack,void * p,uptr new_size,uptr alignment)98 void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
99 uptr alignment) {
100 RegisterDeallocation(p);
101 if (new_size > kMaxAllowedMallocSize) {
102 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
103 allocator.Deallocate(&cache, p);
104 return 0;
105 }
106 p = allocator.Reallocate(&cache, p, new_size, alignment);
107 RegisterAllocation(stack, p, new_size);
108 return p;
109 }
110
GetAllocatorCacheRange(uptr * begin,uptr * end)111 void GetAllocatorCacheRange(uptr *begin, uptr *end) {
112 *begin = (uptr)&cache;
113 *end = *begin + sizeof(cache);
114 }
115
GetMallocUsableSize(void * p)116 uptr GetMallocUsableSize(void *p) {
117 ChunkMetadata *m = Metadata(p);
118 if (!m) return 0;
119 return m->requested_size;
120 }
121
122 ///// Interface to the common LSan module. /////
123
LockAllocator()124 void LockAllocator() {
125 allocator.ForceLock();
126 }
127
UnlockAllocator()128 void UnlockAllocator() {
129 allocator.ForceUnlock();
130 }
131
GetAllocatorGlobalRange(uptr * begin,uptr * end)132 void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
133 *begin = (uptr)&allocator;
134 *end = *begin + sizeof(allocator);
135 }
136
PointsIntoChunk(void * p)137 uptr PointsIntoChunk(void* p) {
138 uptr addr = reinterpret_cast<uptr>(p);
139 uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
140 if (!chunk) return 0;
141 // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
142 // valid, but we don't want that.
143 if (addr < chunk) return 0;
144 ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
145 CHECK(m);
146 if (!m->allocated)
147 return 0;
148 if (addr < chunk + m->requested_size)
149 return chunk;
150 if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
151 return chunk;
152 return 0;
153 }
154
GetUserBegin(uptr chunk)155 uptr GetUserBegin(uptr chunk) {
156 return chunk;
157 }
158
LsanMetadata(uptr chunk)159 LsanMetadata::LsanMetadata(uptr chunk) {
160 metadata_ = Metadata(reinterpret_cast<void *>(chunk));
161 CHECK(metadata_);
162 }
163
allocated() const164 bool LsanMetadata::allocated() const {
165 return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
166 }
167
tag() const168 ChunkTag LsanMetadata::tag() const {
169 return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
170 }
171
set_tag(ChunkTag value)172 void LsanMetadata::set_tag(ChunkTag value) {
173 reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
174 }
175
requested_size() const176 uptr LsanMetadata::requested_size() const {
177 return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
178 }
179
stack_trace_id() const180 u32 LsanMetadata::stack_trace_id() const {
181 return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
182 }
183
ForEachChunk(ForEachChunkCallback callback,void * arg)184 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
185 allocator.ForEachChunk(callback, arg);
186 }
187
IgnoreObjectLocked(const void * p)188 IgnoreObjectResult IgnoreObjectLocked(const void *p) {
189 void *chunk = allocator.GetBlockBegin(p);
190 if (!chunk || p < chunk) return kIgnoreObjectInvalid;
191 ChunkMetadata *m = Metadata(chunk);
192 CHECK(m);
193 if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
194 if (m->tag == kIgnored)
195 return kIgnoreObjectAlreadyIgnored;
196 m->tag = kIgnored;
197 return kIgnoreObjectSuccess;
198 } else {
199 return kIgnoreObjectInvalid;
200 }
201 }
202 } // namespace __lsan
203