1 //===-- tsan_mman.cc ------------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_common.h"
14 #include "sanitizer_common/sanitizer_placement_new.h"
15 #include "tsan_mman.h"
16 #include "tsan_rtl.h"
17 #include "tsan_report.h"
18 #include "tsan_flags.h"
19
20 // May be overriden by front-end.
__tsan_malloc_hook(void * ptr,uptr size)21 extern "C" void WEAK __tsan_malloc_hook(void *ptr, uptr size) {
22 (void)ptr;
23 (void)size;
24 }
25
__tsan_free_hook(void * ptr)26 extern "C" void WEAK __tsan_free_hook(void *ptr) {
27 (void)ptr;
28 }
29
30 namespace __tsan {
31
32 COMPILER_CHECK(sizeof(MBlock) == 16);
33
Lock()34 void MBlock::Lock() {
35 atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
36 uptr v = atomic_load(a, memory_order_relaxed);
37 for (int iter = 0;; iter++) {
38 if (v & 1) {
39 if (iter < 10)
40 proc_yield(20);
41 else
42 internal_sched_yield();
43 v = atomic_load(a, memory_order_relaxed);
44 continue;
45 }
46 if (atomic_compare_exchange_weak(a, &v, v | 1, memory_order_acquire))
47 break;
48 }
49 }
50
Unlock()51 void MBlock::Unlock() {
52 atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
53 uptr v = atomic_load(a, memory_order_relaxed);
54 DCHECK(v & 1);
55 atomic_store(a, v & ~1, memory_order_relaxed);
56 }
57
58 struct MapUnmapCallback {
OnMap__tsan::MapUnmapCallback59 void OnMap(uptr p, uptr size) const { }
OnUnmap__tsan::MapUnmapCallback60 void OnUnmap(uptr p, uptr size) const {
61 // We are about to unmap a chunk of user memory.
62 // Mark the corresponding shadow memory as not needed.
63 DontNeedShadowFor(p, size);
64 }
65 };
66
67 static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
allocator()68 Allocator *allocator() {
69 return reinterpret_cast<Allocator*>(&allocator_placeholder);
70 }
71
InitializeAllocator()72 void InitializeAllocator() {
73 allocator()->Init();
74 }
75
AllocatorThreadStart(ThreadState * thr)76 void AllocatorThreadStart(ThreadState *thr) {
77 allocator()->InitCache(&thr->alloc_cache);
78 internal_allocator()->InitCache(&thr->internal_alloc_cache);
79 }
80
AllocatorThreadFinish(ThreadState * thr)81 void AllocatorThreadFinish(ThreadState *thr) {
82 allocator()->DestroyCache(&thr->alloc_cache);
83 internal_allocator()->DestroyCache(&thr->internal_alloc_cache);
84 }
85
AllocatorPrintStats()86 void AllocatorPrintStats() {
87 allocator()->PrintStats();
88 }
89
SignalUnsafeCall(ThreadState * thr,uptr pc)90 static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
91 if (!thr->in_signal_handler || !flags()->report_signal_unsafe)
92 return;
93 Context *ctx = CTX();
94 StackTrace stack;
95 stack.ObtainCurrent(thr, pc);
96 ThreadRegistryLock l(ctx->thread_registry);
97 ScopedReport rep(ReportTypeSignalUnsafe);
98 if (!IsFiredSuppression(ctx, rep, stack)) {
99 rep.AddStack(&stack);
100 OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
101 }
102 }
103
user_alloc(ThreadState * thr,uptr pc,uptr sz,uptr align)104 void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
105 CHECK_GT(thr->in_rtl, 0);
106 if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
107 return 0;
108 void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
109 if (p == 0)
110 return 0;
111 MBlock *b = new(allocator()->GetMetaData(p)) MBlock;
112 b->Init(sz, thr->tid, CurrentStackId(thr, pc));
113 if (CTX() && CTX()->initialized)
114 MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
115 DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
116 SignalUnsafeCall(thr, pc);
117 return p;
118 }
119
user_free(ThreadState * thr,uptr pc,void * p)120 void user_free(ThreadState *thr, uptr pc, void *p) {
121 CHECK_GT(thr->in_rtl, 0);
122 CHECK_NE(p, (void*)0);
123 DPrintf("#%d: free(%p)\n", thr->tid, p);
124 MBlock *b = (MBlock*)allocator()->GetMetaData(p);
125 if (b->ListHead()) {
126 MBlock::ScopedLock l(b);
127 for (SyncVar *s = b->ListHead(); s;) {
128 SyncVar *res = s;
129 s = s->next;
130 StatInc(thr, StatSyncDestroyed);
131 res->mtx.Lock();
132 res->mtx.Unlock();
133 DestroyAndFree(res);
134 }
135 b->ListReset();
136 }
137 if (CTX() && CTX()->initialized && thr->in_rtl == 1)
138 MemoryRangeFreed(thr, pc, (uptr)p, b->Size());
139 allocator()->Deallocate(&thr->alloc_cache, p);
140 SignalUnsafeCall(thr, pc);
141 }
142
user_realloc(ThreadState * thr,uptr pc,void * p,uptr sz)143 void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
144 CHECK_GT(thr->in_rtl, 0);
145 void *p2 = 0;
146 // FIXME: Handle "shrinking" more efficiently,
147 // it seems that some software actually does this.
148 if (sz) {
149 p2 = user_alloc(thr, pc, sz);
150 if (p2 == 0)
151 return 0;
152 if (p) {
153 MBlock *b = user_mblock(thr, p);
154 CHECK_NE(b, 0);
155 internal_memcpy(p2, p, min(b->Size(), sz));
156 }
157 }
158 if (p)
159 user_free(thr, pc, p);
160 return p2;
161 }
162
user_alloc_usable_size(ThreadState * thr,uptr pc,void * p)163 uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p) {
164 CHECK_GT(thr->in_rtl, 0);
165 if (p == 0)
166 return 0;
167 MBlock *b = (MBlock*)allocator()->GetMetaData(p);
168 return b ? b->Size() : 0;
169 }
170
user_mblock(ThreadState * thr,void * p)171 MBlock *user_mblock(ThreadState *thr, void *p) {
172 CHECK_NE(p, 0);
173 Allocator *a = allocator();
174 void *b = a->GetBlockBegin(p);
175 if (b == 0)
176 return 0;
177 return (MBlock*)a->GetMetaData(b);
178 }
179
invoke_malloc_hook(void * ptr,uptr size)180 void invoke_malloc_hook(void *ptr, uptr size) {
181 Context *ctx = CTX();
182 ThreadState *thr = cur_thread();
183 if (ctx == 0 || !ctx->initialized || thr->in_rtl)
184 return;
185 __tsan_malloc_hook(ptr, size);
186 }
187
invoke_free_hook(void * ptr)188 void invoke_free_hook(void *ptr) {
189 Context *ctx = CTX();
190 ThreadState *thr = cur_thread();
191 if (ctx == 0 || !ctx->initialized || thr->in_rtl)
192 return;
193 __tsan_free_hook(ptr);
194 }
195
internal_alloc(MBlockType typ,uptr sz)196 void *internal_alloc(MBlockType typ, uptr sz) {
197 ThreadState *thr = cur_thread();
198 CHECK_GT(thr->in_rtl, 0);
199 CHECK_LE(sz, InternalSizeClassMap::kMaxSize);
200 if (thr->nomalloc) {
201 thr->nomalloc = 0; // CHECK calls internal_malloc().
202 CHECK(0);
203 }
204 return InternalAlloc(sz, &thr->internal_alloc_cache);
205 }
206
internal_free(void * p)207 void internal_free(void *p) {
208 ThreadState *thr = cur_thread();
209 CHECK_GT(thr->in_rtl, 0);
210 if (thr->nomalloc) {
211 thr->nomalloc = 0; // CHECK calls internal_malloc().
212 CHECK(0);
213 }
214 InternalFree(p, &thr->internal_alloc_cache);
215 }
216
217 } // namespace __tsan
218
219 using namespace __tsan;
220
221 extern "C" {
__tsan_get_current_allocated_bytes()222 uptr __tsan_get_current_allocated_bytes() {
223 u64 stats[AllocatorStatCount];
224 allocator()->GetStats(stats);
225 u64 m = stats[AllocatorStatMalloced];
226 u64 f = stats[AllocatorStatFreed];
227 return m >= f ? m - f : 1;
228 }
229
__tsan_get_heap_size()230 uptr __tsan_get_heap_size() {
231 u64 stats[AllocatorStatCount];
232 allocator()->GetStats(stats);
233 u64 m = stats[AllocatorStatMmapped];
234 u64 f = stats[AllocatorStatUnmapped];
235 return m >= f ? m - f : 1;
236 }
237
__tsan_get_free_bytes()238 uptr __tsan_get_free_bytes() {
239 return 1;
240 }
241
__tsan_get_unmapped_bytes()242 uptr __tsan_get_unmapped_bytes() {
243 return 1;
244 }
245
__tsan_get_estimated_allocated_size(uptr size)246 uptr __tsan_get_estimated_allocated_size(uptr size) {
247 return size;
248 }
249
__tsan_get_ownership(void * p)250 bool __tsan_get_ownership(void *p) {
251 return allocator()->GetBlockBegin(p) != 0;
252 }
253
__tsan_get_allocated_size(void * p)254 uptr __tsan_get_allocated_size(void *p) {
255 if (p == 0)
256 return 0;
257 p = allocator()->GetBlockBegin(p);
258 if (p == 0)
259 return 0;
260 MBlock *b = (MBlock*)allocator()->GetMetaData(p);
261 return b->Size();
262 }
263
__tsan_on_thread_idle()264 void __tsan_on_thread_idle() {
265 ThreadState *thr = cur_thread();
266 allocator()->SwallowCache(&thr->alloc_cache);
267 internal_allocator()->SwallowCache(&thr->internal_alloc_cache);
268 }
269 } // extern "C"
270