1 //===-- asan_allocator2.cc ------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of AddressSanitizer, an address sanity checker.
11 //
12 // Implementation of ASan's memory allocator, 2-nd version.
13 // This variant uses the allocator from sanitizer_common, i.e. the one shared
14 // with ThreadSanitizer and MemorySanitizer.
15 //
16 //===----------------------------------------------------------------------===//
17 #include "asan_allocator.h"
18
19 #include "asan_mapping.h"
20 #include "asan_poisoning.h"
21 #include "asan_report.h"
22 #include "asan_thread.h"
23 #include "sanitizer_common/sanitizer_allocator.h"
24 #include "sanitizer_common/sanitizer_flags.h"
25 #include "sanitizer_common/sanitizer_internal_defs.h"
26 #include "sanitizer_common/sanitizer_list.h"
27 #include "sanitizer_common/sanitizer_stackdepot.h"
28 #include "sanitizer_common/sanitizer_quarantine.h"
29 #include "lsan/lsan_common.h"
30
31 namespace __asan {
32
33 struct AsanMapUnmapCallback {
OnMap__asan::AsanMapUnmapCallback34 void OnMap(uptr p, uptr size) const {
35 PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
36 // Statistics.
37 AsanStats &thread_stats = GetCurrentThreadStats();
38 thread_stats.mmaps++;
39 thread_stats.mmaped += size;
40 }
OnUnmap__asan::AsanMapUnmapCallback41 void OnUnmap(uptr p, uptr size) const {
42 PoisonShadow(p, size, 0);
43 // We are about to unmap a chunk of user memory.
44 // Mark the corresponding shadow memory as not needed.
45 // Since asan's mapping is compacting, the shadow chunk may be
46 // not page-aligned, so we only flush the page-aligned portion.
47 uptr page_size = GetPageSizeCached();
48 uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
49 uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
50 FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
51 // Statistics.
52 AsanStats &thread_stats = GetCurrentThreadStats();
53 thread_stats.munmaps++;
54 thread_stats.munmaped += size;
55 }
56 };
57
58 #if SANITIZER_WORDSIZE == 64
59 #if defined(__powerpc64__)
60 const uptr kAllocatorSpace = 0xa0000000000ULL;
61 const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
62 #else
63 const uptr kAllocatorSpace = 0x600000000000ULL;
64 const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
65 #endif
66 typedef DefaultSizeClassMap SizeClassMap;
67 typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
68 SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
69 #elif SANITIZER_WORDSIZE == 32
70 static const u64 kAddressSpaceSize = 1ULL << 32;
71 typedef CompactSizeClassMap SizeClassMap;
72 static const uptr kRegionSizeLog = 20;
73 static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog;
74 typedef SizeClassAllocator32<0, kAddressSpaceSize, 16,
75 SizeClassMap, kRegionSizeLog,
76 FlatByteMap<kFlatByteMapSize>,
77 AsanMapUnmapCallback> PrimaryAllocator;
78 #endif
79
80 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
81 typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
82 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
83 SecondaryAllocator> Allocator;
84
85 // We can not use THREADLOCAL because it is not supported on some of the
86 // platforms we care about (OSX 10.6, Android).
87 // static THREADLOCAL AllocatorCache cache;
GetAllocatorCache(AsanThreadLocalMallocStorage * ms)88 AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
89 CHECK(ms);
90 CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator2_cache));
91 return reinterpret_cast<AllocatorCache *>(ms->allocator2_cache);
92 }
93
94 static Allocator allocator;
95
96 static const uptr kMaxAllowedMallocSize =
97 FIRST_32_SECOND_64(3UL << 30, 8UL << 30);
98
99 static const uptr kMaxThreadLocalQuarantine =
100 FIRST_32_SECOND_64(1 << 18, 1 << 20);
101
102 // Every chunk of memory allocated by this allocator can be in one of 3 states:
103 // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
104 // CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
105 // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
106 enum {
107 CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
108 CHUNK_ALLOCATED = 2,
109 CHUNK_QUARANTINE = 3
110 };
111
112 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
113 // We use adaptive redzones: for larger allocation larger redzones are used.
RZLog2Size(u32 rz_log)114 static u32 RZLog2Size(u32 rz_log) {
115 CHECK_LT(rz_log, 8);
116 return 16 << rz_log;
117 }
118
RZSize2Log(u32 rz_size)119 static u32 RZSize2Log(u32 rz_size) {
120 CHECK_GE(rz_size, 16);
121 CHECK_LE(rz_size, 2048);
122 CHECK(IsPowerOfTwo(rz_size));
123 u32 res = Log2(rz_size) - 4;
124 CHECK_EQ(rz_size, RZLog2Size(res));
125 return res;
126 }
127
ComputeRZLog(uptr user_requested_size)128 static uptr ComputeRZLog(uptr user_requested_size) {
129 u32 rz_log =
130 user_requested_size <= 64 - 16 ? 0 :
131 user_requested_size <= 128 - 32 ? 1 :
132 user_requested_size <= 512 - 64 ? 2 :
133 user_requested_size <= 4096 - 128 ? 3 :
134 user_requested_size <= (1 << 14) - 256 ? 4 :
135 user_requested_size <= (1 << 15) - 512 ? 5 :
136 user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
137 return Max(rz_log, RZSize2Log(flags()->redzone));
138 }
139
140 // The memory chunk allocated from the underlying allocator looks like this:
141 // L L L L L L H H U U U U U U R R
142 // L -- left redzone words (0 or more bytes)
143 // H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
144 // U -- user memory.
145 // R -- right redzone (0 or more bytes)
146 // ChunkBase consists of ChunkHeader and other bytes that overlap with user
147 // memory.
148
149 // If the left redzone is greater than the ChunkHeader size we store a magic
150 // value in the first uptr word of the memory block and store the address of
151 // ChunkBase in the next uptr.
152 // M B L L L L L L L L L H H U U U U U U
153 // | ^
154 // ---------------------|
155 // M -- magic value kAllocBegMagic
156 // B -- address of ChunkHeader pointing to the first 'H'
157 static const uptr kAllocBegMagic = 0xCC6E96B9;
158
159 struct ChunkHeader {
160 // 1-st 8 bytes.
161 u32 chunk_state : 8; // Must be first.
162 u32 alloc_tid : 24;
163
164 u32 free_tid : 24;
165 u32 from_memalign : 1;
166 u32 alloc_type : 2;
167 u32 rz_log : 3;
168 u32 lsan_tag : 2;
169 // 2-nd 8 bytes
170 // This field is used for small sizes. For large sizes it is equal to
171 // SizeClassMap::kMaxSize and the actual size is stored in the
172 // SecondaryAllocator's metadata.
173 u32 user_requested_size;
174 u32 alloc_context_id;
175 };
176
177 struct ChunkBase : ChunkHeader {
178 // Header2, intersects with user memory.
179 u32 free_context_id;
180 };
181
182 static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
183 static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
184 COMPILER_CHECK(kChunkHeaderSize == 16);
185 COMPILER_CHECK(kChunkHeader2Size <= 16);
186
187 struct AsanChunk: ChunkBase {
Beg__asan::AsanChunk188 uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
UsedSize__asan::AsanChunk189 uptr UsedSize() {
190 if (user_requested_size != SizeClassMap::kMaxSize)
191 return user_requested_size;
192 return *reinterpret_cast<uptr *>(allocator.GetMetaData(AllocBeg()));
193 }
AllocBeg__asan::AsanChunk194 void *AllocBeg() {
195 if (from_memalign)
196 return allocator.GetBlockBegin(reinterpret_cast<void *>(this));
197 return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
198 }
199 // If we don't use stack depot, we store the alloc/free stack traces
200 // in the chunk itself.
AllocStackBeg__asan::AsanChunk201 u32 *AllocStackBeg() {
202 return (u32*)(Beg() - RZLog2Size(rz_log));
203 }
AllocStackSize__asan::AsanChunk204 uptr AllocStackSize() {
205 CHECK_LE(RZLog2Size(rz_log), kChunkHeaderSize);
206 return (RZLog2Size(rz_log) - kChunkHeaderSize) / sizeof(u32);
207 }
FreeStackBeg__asan::AsanChunk208 u32 *FreeStackBeg() {
209 return (u32*)(Beg() + kChunkHeader2Size);
210 }
FreeStackSize__asan::AsanChunk211 uptr FreeStackSize() {
212 if (user_requested_size < kChunkHeader2Size) return 0;
213 uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY);
214 return (available - kChunkHeader2Size) / sizeof(u32);
215 }
AddrIsInside__asan::AsanChunk216 bool AddrIsInside(uptr addr) {
217 return (addr >= Beg()) && (addr < Beg() + UsedSize());
218 }
219 };
220
Beg()221 uptr AsanChunkView::Beg() { return chunk_->Beg(); }
End()222 uptr AsanChunkView::End() { return Beg() + UsedSize(); }
UsedSize()223 uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
AllocTid()224 uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
FreeTid()225 uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
226
GetStackTraceFromId(u32 id,StackTrace * stack)227 static void GetStackTraceFromId(u32 id, StackTrace *stack) {
228 CHECK(id);
229 uptr size = 0;
230 const uptr *trace = StackDepotGet(id, &size);
231 CHECK_LT(size, kStackTraceMax);
232 internal_memcpy(stack->trace, trace, sizeof(uptr) * size);
233 stack->size = size;
234 }
235
GetAllocStack(StackTrace * stack)236 void AsanChunkView::GetAllocStack(StackTrace *stack) {
237 if (flags()->use_stack_depot)
238 GetStackTraceFromId(chunk_->alloc_context_id, stack);
239 else
240 StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(),
241 chunk_->AllocStackSize());
242 }
243
GetFreeStack(StackTrace * stack)244 void AsanChunkView::GetFreeStack(StackTrace *stack) {
245 if (flags()->use_stack_depot)
246 GetStackTraceFromId(chunk_->free_context_id, stack);
247 else
248 StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(),
249 chunk_->FreeStackSize());
250 }
251
252 struct QuarantineCallback;
253 typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
254 typedef AsanQuarantine::Cache QuarantineCache;
255 static AsanQuarantine quarantine(LINKER_INITIALIZED);
256 static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED);
257 static AllocatorCache fallback_allocator_cache;
258 static SpinMutex fallback_mutex;
259
GetQuarantineCache(AsanThreadLocalMallocStorage * ms)260 QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
261 CHECK(ms);
262 CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
263 return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
264 }
265
266 struct QuarantineCallback {
QuarantineCallback__asan::QuarantineCallback267 explicit QuarantineCallback(AllocatorCache *cache)
268 : cache_(cache) {
269 }
270
Recycle__asan::QuarantineCallback271 void Recycle(AsanChunk *m) {
272 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
273 atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
274 CHECK_NE(m->alloc_tid, kInvalidTid);
275 CHECK_NE(m->free_tid, kInvalidTid);
276 PoisonShadow(m->Beg(),
277 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
278 kAsanHeapLeftRedzoneMagic);
279 void *p = reinterpret_cast<void *>(m->AllocBeg());
280 if (p != m) {
281 uptr *alloc_magic = reinterpret_cast<uptr *>(p);
282 CHECK_EQ(alloc_magic[0], kAllocBegMagic);
283 // Clear the magic value, as allocator internals may overwrite the
284 // contents of deallocated chunk, confusing GetAsanChunk lookup.
285 alloc_magic[0] = 0;
286 CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
287 }
288
289 // Statistics.
290 AsanStats &thread_stats = GetCurrentThreadStats();
291 thread_stats.real_frees++;
292 thread_stats.really_freed += m->UsedSize();
293
294 allocator.Deallocate(cache_, p);
295 }
296
Allocate__asan::QuarantineCallback297 void *Allocate(uptr size) {
298 return allocator.Allocate(cache_, size, 1, false);
299 }
300
Deallocate__asan::QuarantineCallback301 void Deallocate(void *p) {
302 allocator.Deallocate(cache_, p);
303 }
304
305 AllocatorCache *cache_;
306 };
307
InitializeAllocator()308 void InitializeAllocator() {
309 allocator.Init();
310 quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
311 }
312
Allocate(uptr size,uptr alignment,StackTrace * stack,AllocType alloc_type,bool can_fill)313 static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
314 AllocType alloc_type, bool can_fill) {
315 if (!asan_inited)
316 __asan_init();
317 Flags &fl = *flags();
318 CHECK(stack);
319 const uptr min_alignment = SHADOW_GRANULARITY;
320 if (alignment < min_alignment)
321 alignment = min_alignment;
322 if (size == 0) {
323 // We'd be happy to avoid allocating memory for zero-size requests, but
324 // some programs/tests depend on this behavior and assume that malloc would
325 // not return NULL even for zero-size allocations. Moreover, it looks like
326 // operator new should never return NULL, and results of consecutive "new"
327 // calls must be different even if the allocated size is zero.
328 size = 1;
329 }
330 CHECK(IsPowerOfTwo(alignment));
331 uptr rz_log = ComputeRZLog(size);
332 uptr rz_size = RZLog2Size(rz_log);
333 uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
334 uptr needed_size = rounded_size + rz_size;
335 if (alignment > min_alignment)
336 needed_size += alignment;
337 bool using_primary_allocator = true;
338 // If we are allocating from the secondary allocator, there will be no
339 // automatic right redzone, so add the right redzone manually.
340 if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
341 needed_size += rz_size;
342 using_primary_allocator = false;
343 }
344 CHECK(IsAligned(needed_size, min_alignment));
345 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
346 Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
347 (void*)size);
348 return 0;
349 }
350
351 AsanThread *t = GetCurrentThread();
352 void *allocated;
353 if (t) {
354 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
355 allocated = allocator.Allocate(cache, needed_size, 8, false);
356 } else {
357 SpinMutexLock l(&fallback_mutex);
358 AllocatorCache *cache = &fallback_allocator_cache;
359 allocated = allocator.Allocate(cache, needed_size, 8, false);
360 }
361 uptr alloc_beg = reinterpret_cast<uptr>(allocated);
362 uptr alloc_end = alloc_beg + needed_size;
363 uptr beg_plus_redzone = alloc_beg + rz_size;
364 uptr user_beg = beg_plus_redzone;
365 if (!IsAligned(user_beg, alignment))
366 user_beg = RoundUpTo(user_beg, alignment);
367 uptr user_end = user_beg + size;
368 CHECK_LE(user_end, alloc_end);
369 uptr chunk_beg = user_beg - kChunkHeaderSize;
370 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
371 m->alloc_type = alloc_type;
372 m->rz_log = rz_log;
373 u32 alloc_tid = t ? t->tid() : 0;
374 m->alloc_tid = alloc_tid;
375 CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
376 m->free_tid = kInvalidTid;
377 m->from_memalign = user_beg != beg_plus_redzone;
378 if (alloc_beg != chunk_beg) {
379 CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
380 reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
381 reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
382 }
383 if (using_primary_allocator) {
384 CHECK(size);
385 m->user_requested_size = size;
386 CHECK(allocator.FromPrimary(allocated));
387 } else {
388 CHECK(!allocator.FromPrimary(allocated));
389 m->user_requested_size = SizeClassMap::kMaxSize;
390 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
391 meta[0] = size;
392 meta[1] = chunk_beg;
393 }
394
395 if (fl.use_stack_depot) {
396 m->alloc_context_id = StackDepotPut(stack->trace, stack->size);
397 } else {
398 m->alloc_context_id = 0;
399 StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize());
400 }
401
402 uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
403 // Unpoison the bulk of the memory region.
404 if (size_rounded_down_to_granularity)
405 PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
406 // Deal with the end of the region if size is not aligned to granularity.
407 if (size != size_rounded_down_to_granularity && fl.poison_heap) {
408 u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
409 *shadow = size & (SHADOW_GRANULARITY - 1);
410 }
411
412 AsanStats &thread_stats = GetCurrentThreadStats();
413 thread_stats.mallocs++;
414 thread_stats.malloced += size;
415 thread_stats.malloced_redzones += needed_size - size;
416 uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size));
417 thread_stats.malloced_by_size[class_id]++;
418 if (needed_size > SizeClassMap::kMaxSize)
419 thread_stats.malloc_large++;
420
421 void *res = reinterpret_cast<void *>(user_beg);
422 if (can_fill && fl.max_malloc_fill_size) {
423 uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
424 REAL(memset)(res, fl.malloc_fill_byte, fill_size);
425 }
426 #if CAN_SANITIZE_LEAKS
427 m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
428 : __lsan::kDirectlyLeaked;
429 #endif
430 // Must be the last mutation of metadata in this function.
431 atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
432 ASAN_MALLOC_HOOK(res, size);
433 return res;
434 }
435
ReportInvalidFree(void * ptr,u8 chunk_state,StackTrace * stack)436 static void ReportInvalidFree(void *ptr, u8 chunk_state, StackTrace *stack) {
437 if (chunk_state == CHUNK_QUARANTINE)
438 ReportDoubleFree((uptr)ptr, stack);
439 else
440 ReportFreeNotMalloced((uptr)ptr, stack);
441 }
442
AtomicallySetQuarantineFlag(AsanChunk * m,void * ptr,StackTrace * stack)443 static void AtomicallySetQuarantineFlag(AsanChunk *m,
444 void *ptr, StackTrace *stack) {
445 u8 old_chunk_state = CHUNK_ALLOCATED;
446 // Flip the chunk_state atomically to avoid race on double-free.
447 if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
448 CHUNK_QUARANTINE, memory_order_acquire))
449 ReportInvalidFree(ptr, old_chunk_state, stack);
450 CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
451 }
452
453 // Expects the chunk to already be marked as quarantined by using
454 // AtomicallySetQuarantineFlag.
QuarantineChunk(AsanChunk * m,void * ptr,StackTrace * stack,AllocType alloc_type)455 static void QuarantineChunk(AsanChunk *m, void *ptr,
456 StackTrace *stack, AllocType alloc_type) {
457 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
458
459 if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
460 ReportAllocTypeMismatch((uptr)ptr, stack,
461 (AllocType)m->alloc_type, (AllocType)alloc_type);
462
463 CHECK_GE(m->alloc_tid, 0);
464 if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
465 CHECK_EQ(m->free_tid, kInvalidTid);
466 AsanThread *t = GetCurrentThread();
467 m->free_tid = t ? t->tid() : 0;
468 if (flags()->use_stack_depot) {
469 m->free_context_id = StackDepotPut(stack->trace, stack->size);
470 } else {
471 m->free_context_id = 0;
472 StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize());
473 }
474 // Poison the region.
475 PoisonShadow(m->Beg(),
476 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
477 kAsanHeapFreeMagic);
478
479 AsanStats &thread_stats = GetCurrentThreadStats();
480 thread_stats.frees++;
481 thread_stats.freed += m->UsedSize();
482
483 // Push into quarantine.
484 if (t) {
485 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
486 AllocatorCache *ac = GetAllocatorCache(ms);
487 quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac),
488 m, m->UsedSize());
489 } else {
490 SpinMutexLock l(&fallback_mutex);
491 AllocatorCache *ac = &fallback_allocator_cache;
492 quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac),
493 m, m->UsedSize());
494 }
495 }
496
Deallocate(void * ptr,StackTrace * stack,AllocType alloc_type)497 static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
498 uptr p = reinterpret_cast<uptr>(ptr);
499 if (p == 0) return;
500
501 uptr chunk_beg = p - kChunkHeaderSize;
502 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
503 ASAN_FREE_HOOK(ptr);
504 // Must mark the chunk as quarantined before any changes to its metadata.
505 AtomicallySetQuarantineFlag(m, ptr, stack);
506 QuarantineChunk(m, ptr, stack, alloc_type);
507 }
508
Reallocate(void * old_ptr,uptr new_size,StackTrace * stack)509 static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) {
510 CHECK(old_ptr && new_size);
511 uptr p = reinterpret_cast<uptr>(old_ptr);
512 uptr chunk_beg = p - kChunkHeaderSize;
513 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
514
515 AsanStats &thread_stats = GetCurrentThreadStats();
516 thread_stats.reallocs++;
517 thread_stats.realloced += new_size;
518
519 void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
520 if (new_ptr) {
521 u8 chunk_state = m->chunk_state;
522 if (chunk_state != CHUNK_ALLOCATED)
523 ReportInvalidFree(old_ptr, chunk_state, stack);
524 CHECK_NE(REAL(memcpy), (void*)0);
525 uptr memcpy_size = Min(new_size, m->UsedSize());
526 // If realloc() races with free(), we may start copying freed memory.
527 // However, we will report racy double-free later anyway.
528 REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
529 Deallocate(old_ptr, stack, FROM_MALLOC);
530 }
531 return new_ptr;
532 }
533
534 // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
GetAsanChunk(void * alloc_beg)535 static AsanChunk *GetAsanChunk(void *alloc_beg) {
536 if (!alloc_beg) return 0;
537 if (!allocator.FromPrimary(alloc_beg)) {
538 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
539 AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
540 return m;
541 }
542 uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
543 if (alloc_magic[0] == kAllocBegMagic)
544 return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
545 return reinterpret_cast<AsanChunk *>(alloc_beg);
546 }
547
GetAsanChunkByAddr(uptr p)548 static AsanChunk *GetAsanChunkByAddr(uptr p) {
549 void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
550 return GetAsanChunk(alloc_beg);
551 }
552
553 // Allocator must be locked when this function is called.
GetAsanChunkByAddrFastLocked(uptr p)554 static AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
555 void *alloc_beg =
556 allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
557 return GetAsanChunk(alloc_beg);
558 }
559
AllocationSize(uptr p)560 static uptr AllocationSize(uptr p) {
561 AsanChunk *m = GetAsanChunkByAddr(p);
562 if (!m) return 0;
563 if (m->chunk_state != CHUNK_ALLOCATED) return 0;
564 if (m->Beg() != p) return 0;
565 return m->UsedSize();
566 }
567
568 // We have an address between two chunks, and we want to report just one.
ChooseChunk(uptr addr,AsanChunk * left_chunk,AsanChunk * right_chunk)569 AsanChunk *ChooseChunk(uptr addr,
570 AsanChunk *left_chunk, AsanChunk *right_chunk) {
571 // Prefer an allocated chunk over freed chunk and freed chunk
572 // over available chunk.
573 if (left_chunk->chunk_state != right_chunk->chunk_state) {
574 if (left_chunk->chunk_state == CHUNK_ALLOCATED)
575 return left_chunk;
576 if (right_chunk->chunk_state == CHUNK_ALLOCATED)
577 return right_chunk;
578 if (left_chunk->chunk_state == CHUNK_QUARANTINE)
579 return left_chunk;
580 if (right_chunk->chunk_state == CHUNK_QUARANTINE)
581 return right_chunk;
582 }
583 // Same chunk_state: choose based on offset.
584 sptr l_offset = 0, r_offset = 0;
585 CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
586 CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
587 if (l_offset < r_offset)
588 return left_chunk;
589 return right_chunk;
590 }
591
FindHeapChunkByAddress(uptr addr)592 AsanChunkView FindHeapChunkByAddress(uptr addr) {
593 AsanChunk *m1 = GetAsanChunkByAddr(addr);
594 if (!m1) return AsanChunkView(m1);
595 sptr offset = 0;
596 if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
597 // The address is in the chunk's left redzone, so maybe it is actually
598 // a right buffer overflow from the other chunk to the left.
599 // Search a bit to the left to see if there is another chunk.
600 AsanChunk *m2 = 0;
601 for (uptr l = 1; l < GetPageSizeCached(); l++) {
602 m2 = GetAsanChunkByAddr(addr - l);
603 if (m2 == m1) continue; // Still the same chunk.
604 break;
605 }
606 if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
607 m1 = ChooseChunk(addr, m2, m1);
608 }
609 return AsanChunkView(m1);
610 }
611
CommitBack()612 void AsanThreadLocalMallocStorage::CommitBack() {
613 AllocatorCache *ac = GetAllocatorCache(this);
614 quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac));
615 allocator.SwallowCache(GetAllocatorCache(this));
616 }
617
PrintInternalAllocatorStats()618 void PrintInternalAllocatorStats() {
619 allocator.PrintStats();
620 }
621
622 SANITIZER_INTERFACE_ATTRIBUTE
asan_memalign(uptr alignment,uptr size,StackTrace * stack,AllocType alloc_type)623 void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
624 AllocType alloc_type) {
625 return Allocate(size, alignment, stack, alloc_type, true);
626 }
627
628 SANITIZER_INTERFACE_ATTRIBUTE
asan_free(void * ptr,StackTrace * stack,AllocType alloc_type)629 void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) {
630 Deallocate(ptr, stack, alloc_type);
631 }
632
633 SANITIZER_INTERFACE_ATTRIBUTE
asan_malloc(uptr size,StackTrace * stack)634 void *asan_malloc(uptr size, StackTrace *stack) {
635 return Allocate(size, 8, stack, FROM_MALLOC, true);
636 }
637
asan_calloc(uptr nmemb,uptr size,StackTrace * stack)638 void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
639 if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0;
640 void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
641 // If the memory comes from the secondary allocator no need to clear it
642 // as it comes directly from mmap.
643 if (ptr && allocator.FromPrimary(ptr))
644 REAL(memset)(ptr, 0, nmemb * size);
645 return ptr;
646 }
647
asan_realloc(void * p,uptr size,StackTrace * stack)648 void *asan_realloc(void *p, uptr size, StackTrace *stack) {
649 if (p == 0)
650 return Allocate(size, 8, stack, FROM_MALLOC, true);
651 if (size == 0) {
652 Deallocate(p, stack, FROM_MALLOC);
653 return 0;
654 }
655 return Reallocate(p, size, stack);
656 }
657
asan_valloc(uptr size,StackTrace * stack)658 void *asan_valloc(uptr size, StackTrace *stack) {
659 return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
660 }
661
asan_pvalloc(uptr size,StackTrace * stack)662 void *asan_pvalloc(uptr size, StackTrace *stack) {
663 uptr PageSize = GetPageSizeCached();
664 size = RoundUpTo(size, PageSize);
665 if (size == 0) {
666 // pvalloc(0) should allocate one page.
667 size = PageSize;
668 }
669 return Allocate(size, PageSize, stack, FROM_MALLOC, true);
670 }
671
asan_posix_memalign(void ** memptr,uptr alignment,uptr size,StackTrace * stack)672 int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
673 StackTrace *stack) {
674 void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true);
675 CHECK(IsAligned((uptr)ptr, alignment));
676 *memptr = ptr;
677 return 0;
678 }
679
680 SANITIZER_INTERFACE_ATTRIBUTE
asan_malloc_usable_size(void * ptr,StackTrace * stack)681 uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) {
682 CHECK(stack);
683 if (ptr == 0) return 0;
684 uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr));
685 if (flags()->check_malloc_usable_size && (usable_size == 0))
686 ReportMallocUsableSizeNotOwned((uptr)ptr, stack);
687 return usable_size;
688 }
689
asan_mz_size(const void * ptr)690 uptr asan_mz_size(const void *ptr) {
691 return AllocationSize(reinterpret_cast<uptr>(ptr));
692 }
693
asan_mz_force_lock()694 void asan_mz_force_lock() {
695 allocator.ForceLock();
696 fallback_mutex.Lock();
697 }
698
asan_mz_force_unlock()699 void asan_mz_force_unlock() {
700 fallback_mutex.Unlock();
701 allocator.ForceUnlock();
702 }
703
704 } // namespace __asan
705
706 // --- Implementation of LSan-specific functions --- {{{1
707 namespace __lsan {
LockAllocator()708 void LockAllocator() {
709 __asan::allocator.ForceLock();
710 }
711
UnlockAllocator()712 void UnlockAllocator() {
713 __asan::allocator.ForceUnlock();
714 }
715
GetAllocatorGlobalRange(uptr * begin,uptr * end)716 void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
717 *begin = (uptr)&__asan::allocator;
718 *end = *begin + sizeof(__asan::allocator);
719 }
720
PointsIntoChunk(void * p)721 uptr PointsIntoChunk(void* p) {
722 uptr addr = reinterpret_cast<uptr>(p);
723 __asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr);
724 if (!m) return 0;
725 uptr chunk = m->Beg();
726 if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr))
727 return chunk;
728 return 0;
729 }
730
GetUserBegin(uptr chunk)731 uptr GetUserBegin(uptr chunk) {
732 __asan::AsanChunk *m =
733 __asan::GetAsanChunkByAddrFastLocked(chunk);
734 CHECK(m);
735 return m->Beg();
736 }
737
LsanMetadata(uptr chunk)738 LsanMetadata::LsanMetadata(uptr chunk) {
739 metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
740 }
741
allocated() const742 bool LsanMetadata::allocated() const {
743 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
744 return m->chunk_state == __asan::CHUNK_ALLOCATED;
745 }
746
tag() const747 ChunkTag LsanMetadata::tag() const {
748 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
749 return static_cast<ChunkTag>(m->lsan_tag);
750 }
751
set_tag(ChunkTag value)752 void LsanMetadata::set_tag(ChunkTag value) {
753 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
754 m->lsan_tag = value;
755 }
756
requested_size() const757 uptr LsanMetadata::requested_size() const {
758 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
759 return m->UsedSize();
760 }
761
stack_trace_id() const762 u32 LsanMetadata::stack_trace_id() const {
763 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
764 return m->alloc_context_id;
765 }
766
ForEachChunk(ForEachChunkCallback callback,void * arg)767 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
768 __asan::allocator.ForEachChunk(callback, arg);
769 }
770
IgnoreObjectLocked(const void * p)771 IgnoreObjectResult IgnoreObjectLocked(const void *p) {
772 uptr addr = reinterpret_cast<uptr>(p);
773 __asan::AsanChunk *m = __asan::GetAsanChunkByAddr(addr);
774 if (!m) return kIgnoreObjectInvalid;
775 if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
776 if (m->lsan_tag == kIgnored)
777 return kIgnoreObjectAlreadyIgnored;
778 m->lsan_tag = __lsan::kIgnored;
779 return kIgnoreObjectSuccess;
780 } else {
781 return kIgnoreObjectInvalid;
782 }
783 }
784 } // namespace __lsan
785
786 // ---------------------- Interface ---------------- {{{1
787 using namespace __asan; // NOLINT
788
789 // ASan allocator doesn't reserve extra bytes, so normally we would
790 // just return "size". We don't want to expose our redzone sizes, etc here.
__asan_get_estimated_allocated_size(uptr size)791 uptr __asan_get_estimated_allocated_size(uptr size) {
792 return size;
793 }
794
__asan_get_ownership(const void * p)795 bool __asan_get_ownership(const void *p) {
796 uptr ptr = reinterpret_cast<uptr>(p);
797 return (AllocationSize(ptr) > 0);
798 }
799
__asan_get_allocated_size(const void * p)800 uptr __asan_get_allocated_size(const void *p) {
801 if (p == 0) return 0;
802 uptr ptr = reinterpret_cast<uptr>(p);
803 uptr allocated_size = AllocationSize(ptr);
804 // Die if p is not malloced or if it is already freed.
805 if (allocated_size == 0) {
806 GET_STACK_TRACE_FATAL_HERE;
807 ReportAsanGetAllocatedSizeNotOwned(ptr, &stack);
808 }
809 return allocated_size;
810 }
811
812 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
813 // Provide default (no-op) implementation of malloc hooks.
814 extern "C" {
815 SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
__asan_malloc_hook(void * ptr,uptr size)816 void __asan_malloc_hook(void *ptr, uptr size) {
817 (void)ptr;
818 (void)size;
819 }
820 SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
__asan_free_hook(void * ptr)821 void __asan_free_hook(void *ptr) {
822 (void)ptr;
823 }
824 } // extern "C"
825 #endif
826