1 //===-- asan_allocator.cc -------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of AddressSanitizer, an address sanity checker.
11 //
12 // Implementation of ASan's memory allocator.
13 // Evey piece of memory (AsanChunk) allocated by the allocator
14 // has a left redzone of REDZONE bytes and
15 // a right redzone such that the end of the chunk is aligned by REDZONE
16 // (i.e. the right redzone is between 0 and REDZONE-1).
17 // The left redzone is always poisoned.
18 // The right redzone is poisoned on malloc, the body is poisoned on free.
19 // Once freed, a chunk is moved to a quarantine (fifo list).
20 // After quarantine, a chunk is returned to freelists.
21 //
22 // The left redzone contains ASan's internal data and the stack trace of
23 // the malloc call.
24 // Once freed, the body of the chunk contains the stack trace of the free call.
25 //
26 //===----------------------------------------------------------------------===//
27
28 #include "asan_allocator.h"
29 #include "asan_interceptors.h"
30 #include "asan_internal.h"
31 #include "asan_lock.h"
32 #include "asan_mapping.h"
33 #include "asan_stats.h"
34 #include "asan_report.h"
35 #include "asan_thread.h"
36 #include "asan_thread_registry.h"
37 #include "sanitizer/asan_interface.h"
38 #include "sanitizer_common/sanitizer_atomic.h"
39
40 #if defined(_WIN32) && !defined(__clang__)
41 #include <intrin.h>
42 #endif
43
44 namespace __asan {
45
46 #define REDZONE ((uptr)(flags()->redzone))
47 static const uptr kMinAllocSize = REDZONE * 2;
48 static const u64 kMaxAvailableRam = 128ULL << 30; // 128G
49 static const uptr kMaxThreadLocalQuarantine = 1 << 20; // 1M
50
51 static const uptr kMinMmapSize = (ASAN_LOW_MEMORY) ? 4UL << 17 : 4UL << 20;
52 static const uptr kMaxSizeForThreadLocalFreeList =
53 (ASAN_LOW_MEMORY) ? 1 << 15 : 1 << 17;
54
55 // Size classes less than kMallocSizeClassStep are powers of two.
56 // All other size classes are multiples of kMallocSizeClassStep.
57 static const uptr kMallocSizeClassStepLog = 26;
58 static const uptr kMallocSizeClassStep = 1UL << kMallocSizeClassStepLog;
59
60 static const uptr kMaxAllowedMallocSize =
61 (__WORDSIZE == 32) ? 3UL << 30 : 8UL << 30;
62
IsAligned(uptr a,uptr alignment)63 static inline bool IsAligned(uptr a, uptr alignment) {
64 return (a & (alignment - 1)) == 0;
65 }
66
Log2(uptr x)67 static inline uptr Log2(uptr x) {
68 CHECK(IsPowerOfTwo(x));
69 #if !defined(_WIN32) || defined(__clang__)
70 return __builtin_ctzl(x);
71 #elif defined(_WIN64)
72 unsigned long ret; // NOLINT
73 _BitScanForward64(&ret, x);
74 return ret;
75 #else
76 unsigned long ret; // NOLINT
77 _BitScanForward(&ret, x);
78 return ret;
79 #endif
80 }
81
RoundUpToPowerOfTwo(uptr size)82 static inline uptr RoundUpToPowerOfTwo(uptr size) {
83 CHECK(size);
84 if (IsPowerOfTwo(size)) return size;
85
86 unsigned long up; // NOLINT
87 #if !defined(_WIN32) || defined(__clang__)
88 up = __WORDSIZE - 1 - __builtin_clzl(size);
89 #elif defined(_WIN64)
90 _BitScanReverse64(&up, size);
91 #else
92 _BitScanReverse(&up, size);
93 #endif
94 CHECK(size < (1ULL << (up + 1)));
95 CHECK(size > (1ULL << up));
96 return 1UL << (up + 1);
97 }
98
SizeClassToSize(u8 size_class)99 static inline uptr SizeClassToSize(u8 size_class) {
100 CHECK(size_class < kNumberOfSizeClasses);
101 if (size_class <= kMallocSizeClassStepLog) {
102 return 1UL << size_class;
103 } else {
104 return (size_class - kMallocSizeClassStepLog) * kMallocSizeClassStep;
105 }
106 }
107
SizeToSizeClass(uptr size)108 static inline u8 SizeToSizeClass(uptr size) {
109 u8 res = 0;
110 if (size <= kMallocSizeClassStep) {
111 uptr rounded = RoundUpToPowerOfTwo(size);
112 res = Log2(rounded);
113 } else {
114 res = ((size + kMallocSizeClassStep - 1) / kMallocSizeClassStep)
115 + kMallocSizeClassStepLog;
116 }
117 CHECK(res < kNumberOfSizeClasses);
118 CHECK(size <= SizeClassToSize(res));
119 return res;
120 }
121
122 // Given REDZONE bytes, we need to mark first size bytes
123 // as addressable and the rest REDZONE-size bytes as unaddressable.
PoisonHeapPartialRightRedzone(uptr mem,uptr size)124 static void PoisonHeapPartialRightRedzone(uptr mem, uptr size) {
125 CHECK(size <= REDZONE);
126 CHECK(IsAligned(mem, REDZONE));
127 CHECK(IsPowerOfTwo(SHADOW_GRANULARITY));
128 CHECK(IsPowerOfTwo(REDZONE));
129 CHECK(REDZONE >= SHADOW_GRANULARITY);
130 PoisonShadowPartialRightRedzone(mem, size, REDZONE,
131 kAsanHeapRightRedzoneMagic);
132 }
133
MmapNewPagesAndPoisonShadow(uptr size)134 static u8 *MmapNewPagesAndPoisonShadow(uptr size) {
135 CHECK(IsAligned(size, kPageSize));
136 u8 *res = (u8*)MmapOrDie(size, __FUNCTION__);
137 PoisonShadow((uptr)res, size, kAsanHeapLeftRedzoneMagic);
138 if (flags()->debug) {
139 Printf("ASAN_MMAP: [%p, %p)\n", res, res + size);
140 }
141 return res;
142 }
143
144 // Every chunk of memory allocated by this allocator can be in one of 3 states:
145 // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
146 // CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
147 // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
148 //
149 // The pseudo state CHUNK_MEMALIGN is used to mark that the address is not
150 // the beginning of a AsanChunk (in which the actual chunk resides at
151 // this - this->used_size).
152 //
153 // The magic numbers for the enum values are taken randomly.
154 enum {
155 CHUNK_AVAILABLE = 0x57,
156 CHUNK_ALLOCATED = 0x32,
157 CHUNK_QUARANTINE = 0x19,
158 CHUNK_MEMALIGN = 0xDC
159 };
160
161 struct ChunkBase {
162 // First 8 bytes.
163 uptr chunk_state : 8;
164 uptr alloc_tid : 24;
165 uptr size_class : 8;
166 uptr free_tid : 24;
167
168 // Second 8 bytes.
169 uptr alignment_log : 8;
170 uptr used_size : FIRST_32_SECOND_64(32, 56); // Size requested by the user.
171
172 // This field may overlap with the user area and thus should not
173 // be used while the chunk is in CHUNK_ALLOCATED state.
174 AsanChunk *next;
175
176 // Typically the beginning of the user-accessible memory is 'this'+REDZONE
177 // and is also aligned by REDZONE. However, if the memory is allocated
178 // by memalign, the alignment might be higher and the user-accessible memory
179 // starts at the first properly aligned address after 'this'.
Beg__asan::ChunkBase180 uptr Beg() { return RoundUpTo((uptr)this + 1, 1 << alignment_log); }
Size__asan::ChunkBase181 uptr Size() { return SizeClassToSize(size_class); }
SizeClass__asan::ChunkBase182 u8 SizeClass() { return size_class; }
183 };
184
185 struct AsanChunk: public ChunkBase {
compressed_alloc_stack__asan::AsanChunk186 u32 *compressed_alloc_stack() {
187 return (u32*)((uptr)this + sizeof(ChunkBase));
188 }
compressed_free_stack__asan::AsanChunk189 u32 *compressed_free_stack() {
190 return (u32*)((uptr)this + Max((uptr)REDZONE, (uptr)sizeof(ChunkBase)));
191 }
192
193 // The left redzone after the ChunkBase is given to the alloc stack trace.
compressed_alloc_stack_size__asan::AsanChunk194 uptr compressed_alloc_stack_size() {
195 if (REDZONE < sizeof(ChunkBase)) return 0;
196 return (REDZONE - sizeof(ChunkBase)) / sizeof(u32);
197 }
compressed_free_stack_size__asan::AsanChunk198 uptr compressed_free_stack_size() {
199 if (REDZONE < sizeof(ChunkBase)) return 0;
200 return (REDZONE) / sizeof(u32);
201 }
202
AddrIsInside__asan::AsanChunk203 bool AddrIsInside(uptr addr, uptr access_size, uptr *offset) {
204 if (addr >= Beg() && (addr + access_size) <= (Beg() + used_size)) {
205 *offset = addr - Beg();
206 return true;
207 }
208 return false;
209 }
210
AddrIsAtLeft__asan::AsanChunk211 bool AddrIsAtLeft(uptr addr, uptr access_size, uptr *offset) {
212 if (addr < Beg()) {
213 *offset = Beg() - addr;
214 return true;
215 }
216 return false;
217 }
218
AddrIsAtRight__asan::AsanChunk219 bool AddrIsAtRight(uptr addr, uptr access_size, uptr *offset) {
220 if (addr + access_size >= Beg() + used_size) {
221 if (addr <= Beg() + used_size)
222 *offset = 0;
223 else
224 *offset = addr - (Beg() + used_size);
225 return true;
226 }
227 return false;
228 }
229
DescribeAddress__asan::AsanChunk230 void DescribeAddress(uptr addr, uptr access_size) {
231 uptr offset;
232 Printf("%p is located ", (void*)addr);
233 if (AddrIsInside(addr, access_size, &offset)) {
234 Printf("%zu bytes inside of", offset);
235 } else if (AddrIsAtLeft(addr, access_size, &offset)) {
236 Printf("%zu bytes to the left of", offset);
237 } else if (AddrIsAtRight(addr, access_size, &offset)) {
238 Printf("%zu bytes to the right of", offset);
239 } else {
240 Printf(" somewhere around (this is AddressSanitizer bug!)");
241 }
242 Printf(" %zu-byte region [%p,%p)\n",
243 used_size, (void*)Beg(), (void*)(Beg() + used_size));
244 }
245 };
246
PtrToChunk(uptr ptr)247 static AsanChunk *PtrToChunk(uptr ptr) {
248 AsanChunk *m = (AsanChunk*)(ptr - REDZONE);
249 if (m->chunk_state == CHUNK_MEMALIGN) {
250 m = (AsanChunk*)((uptr)m - m->used_size);
251 }
252 return m;
253 }
254
255
PushList(AsanChunkFifoList * q)256 void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
257 CHECK(q->size() > 0);
258 if (last_) {
259 CHECK(first_);
260 CHECK(!last_->next);
261 last_->next = q->first_;
262 last_ = q->last_;
263 } else {
264 CHECK(!first_);
265 last_ = q->last_;
266 first_ = q->first_;
267 CHECK(first_);
268 }
269 CHECK(last_);
270 CHECK(!last_->next);
271 size_ += q->size();
272 q->clear();
273 }
274
Push(AsanChunk * n)275 void AsanChunkFifoList::Push(AsanChunk *n) {
276 CHECK(n->next == 0);
277 if (last_) {
278 CHECK(first_);
279 CHECK(!last_->next);
280 last_->next = n;
281 last_ = n;
282 } else {
283 CHECK(!first_);
284 last_ = first_ = n;
285 }
286 size_ += n->Size();
287 }
288
289 // Interesting performance observation: this function takes up to 15% of overal
290 // allocator time. That's because *first_ has been evicted from cache long time
291 // ago. Not sure if we can or want to do anything with this.
Pop()292 AsanChunk *AsanChunkFifoList::Pop() {
293 CHECK(first_);
294 AsanChunk *res = first_;
295 first_ = first_->next;
296 if (first_ == 0)
297 last_ = 0;
298 CHECK(size_ >= res->Size());
299 size_ -= res->Size();
300 if (last_) {
301 CHECK(!last_->next);
302 }
303 return res;
304 }
305
306 // All pages we ever allocated.
307 struct PageGroup {
308 uptr beg;
309 uptr end;
310 uptr size_of_chunk;
311 uptr last_chunk;
InRange__asan::PageGroup312 bool InRange(uptr addr) {
313 return addr >= beg && addr < end;
314 }
315 };
316
317 class MallocInfo {
318 public:
MallocInfo(LinkerInitialized x)319 explicit MallocInfo(LinkerInitialized x) : mu_(x) { }
320
AllocateChunks(u8 size_class,uptr n_chunks)321 AsanChunk *AllocateChunks(u8 size_class, uptr n_chunks) {
322 AsanChunk *m = 0;
323 AsanChunk **fl = &free_lists_[size_class];
324 {
325 ScopedLock lock(&mu_);
326 for (uptr i = 0; i < n_chunks; i++) {
327 if (!(*fl)) {
328 *fl = GetNewChunks(size_class);
329 }
330 AsanChunk *t = *fl;
331 *fl = t->next;
332 t->next = m;
333 CHECK(t->chunk_state == CHUNK_AVAILABLE);
334 m = t;
335 }
336 }
337 return m;
338 }
339
SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage * x,bool eat_free_lists)340 void SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage *x,
341 bool eat_free_lists) {
342 CHECK(flags()->quarantine_size > 0);
343 ScopedLock lock(&mu_);
344 AsanChunkFifoList *q = &x->quarantine_;
345 if (q->size() > 0) {
346 quarantine_.PushList(q);
347 while (quarantine_.size() > (uptr)flags()->quarantine_size) {
348 QuarantinePop();
349 }
350 }
351 if (eat_free_lists) {
352 for (uptr size_class = 0; size_class < kNumberOfSizeClasses;
353 size_class++) {
354 AsanChunk *m = x->free_lists_[size_class];
355 while (m) {
356 AsanChunk *t = m->next;
357 m->next = free_lists_[size_class];
358 free_lists_[size_class] = m;
359 m = t;
360 }
361 x->free_lists_[size_class] = 0;
362 }
363 }
364 }
365
BypassThreadLocalQuarantine(AsanChunk * chunk)366 void BypassThreadLocalQuarantine(AsanChunk *chunk) {
367 ScopedLock lock(&mu_);
368 quarantine_.Push(chunk);
369 }
370
FindMallocedOrFreed(uptr addr,uptr access_size)371 AsanChunk *FindMallocedOrFreed(uptr addr, uptr access_size) {
372 ScopedLock lock(&mu_);
373 return FindChunkByAddr(addr);
374 }
375
AllocationSize(uptr ptr)376 uptr AllocationSize(uptr ptr) {
377 if (!ptr) return 0;
378 ScopedLock lock(&mu_);
379
380 // Make sure this is our chunk and |ptr| actually points to the beginning
381 // of the allocated memory.
382 AsanChunk *m = FindChunkByAddr(ptr);
383 if (!m || m->Beg() != ptr) return 0;
384
385 if (m->chunk_state == CHUNK_ALLOCATED) {
386 return m->used_size;
387 } else {
388 return 0;
389 }
390 }
391
ForceLock()392 void ForceLock() {
393 mu_.Lock();
394 }
395
ForceUnlock()396 void ForceUnlock() {
397 mu_.Unlock();
398 }
399
PrintStatus()400 void PrintStatus() {
401 ScopedLock lock(&mu_);
402 uptr malloced = 0;
403
404 Printf(" MallocInfo: in quarantine: %zu malloced: %zu; ",
405 quarantine_.size() >> 20, malloced >> 20);
406 for (uptr j = 1; j < kNumberOfSizeClasses; j++) {
407 AsanChunk *i = free_lists_[j];
408 if (!i) continue;
409 uptr t = 0;
410 for (; i; i = i->next) {
411 t += i->Size();
412 }
413 Printf("%zu:%zu ", j, t >> 20);
414 }
415 Printf("\n");
416 }
417
FindPageGroup(uptr addr)418 PageGroup *FindPageGroup(uptr addr) {
419 ScopedLock lock(&mu_);
420 return FindPageGroupUnlocked(addr);
421 }
422
423 private:
FindPageGroupUnlocked(uptr addr)424 PageGroup *FindPageGroupUnlocked(uptr addr) {
425 int n = atomic_load(&n_page_groups_, memory_order_relaxed);
426 // If the page groups are not sorted yet, sort them.
427 if (n_sorted_page_groups_ < n) {
428 SortArray((uptr*)page_groups_, n);
429 n_sorted_page_groups_ = n;
430 }
431 // Binary search over the page groups.
432 int beg = 0, end = n;
433 while (beg < end) {
434 int med = (beg + end) / 2;
435 uptr g = (uptr)page_groups_[med];
436 if (addr > g) {
437 // 'g' points to the end of the group, so 'addr'
438 // may not belong to page_groups_[med] or any previous group.
439 beg = med + 1;
440 } else {
441 // 'addr' may belong to page_groups_[med] or a previous group.
442 end = med;
443 }
444 }
445 if (beg >= n)
446 return 0;
447 PageGroup *g = page_groups_[beg];
448 CHECK(g);
449 if (g->InRange(addr))
450 return g;
451 return 0;
452 }
453
454 // We have an address between two chunks, and we want to report just one.
ChooseChunk(uptr addr,AsanChunk * left_chunk,AsanChunk * right_chunk)455 AsanChunk *ChooseChunk(uptr addr,
456 AsanChunk *left_chunk, AsanChunk *right_chunk) {
457 // Prefer an allocated chunk or a chunk from quarantine.
458 if (left_chunk->chunk_state == CHUNK_AVAILABLE &&
459 right_chunk->chunk_state != CHUNK_AVAILABLE)
460 return right_chunk;
461 if (right_chunk->chunk_state == CHUNK_AVAILABLE &&
462 left_chunk->chunk_state != CHUNK_AVAILABLE)
463 return left_chunk;
464 // Choose based on offset.
465 uptr l_offset = 0, r_offset = 0;
466 CHECK(left_chunk->AddrIsAtRight(addr, 1, &l_offset));
467 CHECK(right_chunk->AddrIsAtLeft(addr, 1, &r_offset));
468 if (l_offset < r_offset)
469 return left_chunk;
470 return right_chunk;
471 }
472
FindChunkByAddr(uptr addr)473 AsanChunk *FindChunkByAddr(uptr addr) {
474 PageGroup *g = FindPageGroupUnlocked(addr);
475 if (!g) return 0;
476 CHECK(g->size_of_chunk);
477 uptr offset_from_beg = addr - g->beg;
478 uptr this_chunk_addr = g->beg +
479 (offset_from_beg / g->size_of_chunk) * g->size_of_chunk;
480 CHECK(g->InRange(this_chunk_addr));
481 AsanChunk *m = (AsanChunk*)this_chunk_addr;
482 CHECK(m->chunk_state == CHUNK_ALLOCATED ||
483 m->chunk_state == CHUNK_AVAILABLE ||
484 m->chunk_state == CHUNK_QUARANTINE);
485 uptr offset = 0;
486 if (m->AddrIsInside(addr, 1, &offset))
487 return m;
488
489 if (m->AddrIsAtRight(addr, 1, &offset)) {
490 if (this_chunk_addr == g->last_chunk) // rightmost chunk
491 return m;
492 uptr right_chunk_addr = this_chunk_addr + g->size_of_chunk;
493 CHECK(g->InRange(right_chunk_addr));
494 return ChooseChunk(addr, m, (AsanChunk*)right_chunk_addr);
495 } else {
496 CHECK(m->AddrIsAtLeft(addr, 1, &offset));
497 if (this_chunk_addr == g->beg) // leftmost chunk
498 return m;
499 uptr left_chunk_addr = this_chunk_addr - g->size_of_chunk;
500 CHECK(g->InRange(left_chunk_addr));
501 return ChooseChunk(addr, (AsanChunk*)left_chunk_addr, m);
502 }
503 }
504
QuarantinePop()505 void QuarantinePop() {
506 CHECK(quarantine_.size() > 0);
507 AsanChunk *m = quarantine_.Pop();
508 CHECK(m);
509 // if (F_v >= 2) Printf("MallocInfo::pop %p\n", m);
510
511 CHECK(m->chunk_state == CHUNK_QUARANTINE);
512 m->chunk_state = CHUNK_AVAILABLE;
513 PoisonShadow((uptr)m, m->Size(), kAsanHeapLeftRedzoneMagic);
514 CHECK(m->alloc_tid >= 0);
515 CHECK(m->free_tid >= 0);
516
517 uptr size_class = m->SizeClass();
518 m->next = free_lists_[size_class];
519 free_lists_[size_class] = m;
520
521 // Statistics.
522 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
523 thread_stats.real_frees++;
524 thread_stats.really_freed += m->used_size;
525 thread_stats.really_freed_redzones += m->Size() - m->used_size;
526 thread_stats.really_freed_by_size[m->SizeClass()]++;
527 }
528
529 // Get a list of newly allocated chunks.
GetNewChunks(u8 size_class)530 AsanChunk *GetNewChunks(u8 size_class) {
531 uptr size = SizeClassToSize(size_class);
532 CHECK(IsPowerOfTwo(kMinMmapSize));
533 CHECK(size < kMinMmapSize || (size % kMinMmapSize) == 0);
534 uptr mmap_size = Max(size, kMinMmapSize);
535 uptr n_chunks = mmap_size / size;
536 CHECK(n_chunks * size == mmap_size);
537 if (size < kPageSize) {
538 // Size is small, just poison the last chunk.
539 n_chunks--;
540 } else {
541 // Size is large, allocate an extra page at right and poison it.
542 mmap_size += kPageSize;
543 }
544 CHECK(n_chunks > 0);
545 u8 *mem = MmapNewPagesAndPoisonShadow(mmap_size);
546
547 // Statistics.
548 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
549 thread_stats.mmaps++;
550 thread_stats.mmaped += mmap_size;
551 thread_stats.mmaped_by_size[size_class] += n_chunks;
552
553 AsanChunk *res = 0;
554 for (uptr i = 0; i < n_chunks; i++) {
555 AsanChunk *m = (AsanChunk*)(mem + i * size);
556 m->chunk_state = CHUNK_AVAILABLE;
557 m->size_class = size_class;
558 m->next = res;
559 res = m;
560 }
561 PageGroup *pg = (PageGroup*)(mem + n_chunks * size);
562 // This memory is already poisoned, no need to poison it again.
563 pg->beg = (uptr)mem;
564 pg->end = pg->beg + mmap_size;
565 pg->size_of_chunk = size;
566 pg->last_chunk = (uptr)(mem + size * (n_chunks - 1));
567 int idx = atomic_fetch_add(&n_page_groups_, 1, memory_order_relaxed);
568 CHECK(idx < (int)ARRAY_SIZE(page_groups_));
569 page_groups_[idx] = pg;
570 return res;
571 }
572
573 AsanChunk *free_lists_[kNumberOfSizeClasses];
574 AsanChunkFifoList quarantine_;
575 AsanLock mu_;
576
577 PageGroup *page_groups_[kMaxAvailableRam / kMinMmapSize];
578 atomic_uint32_t n_page_groups_;
579 int n_sorted_page_groups_;
580 };
581
582 static MallocInfo malloc_info(LINKER_INITIALIZED);
583
CommitBack()584 void AsanThreadLocalMallocStorage::CommitBack() {
585 malloc_info.SwallowThreadLocalMallocStorage(this, true);
586 }
587
DescribeHeapAddress(uptr addr,uptr access_size)588 void DescribeHeapAddress(uptr addr, uptr access_size) {
589 AsanChunk *m = malloc_info.FindMallocedOrFreed(addr, access_size);
590 if (!m) return;
591 m->DescribeAddress(addr, access_size);
592 CHECK(m->alloc_tid >= 0);
593 AsanThreadSummary *alloc_thread =
594 asanThreadRegistry().FindByTid(m->alloc_tid);
595 StackTrace alloc_stack;
596 StackTrace::UncompressStack(&alloc_stack, m->compressed_alloc_stack(),
597 m->compressed_alloc_stack_size());
598 AsanThread *t = asanThreadRegistry().GetCurrent();
599 CHECK(t);
600 if (m->free_tid != kInvalidTid) {
601 AsanThreadSummary *free_thread =
602 asanThreadRegistry().FindByTid(m->free_tid);
603 Printf("freed by thread T%d here:\n", free_thread->tid());
604 StackTrace free_stack;
605 StackTrace::UncompressStack(&free_stack, m->compressed_free_stack(),
606 m->compressed_free_stack_size());
607 PrintStack(&free_stack);
608 Printf("previously allocated by thread T%d here:\n",
609 alloc_thread->tid());
610
611 PrintStack(&alloc_stack);
612 DescribeThread(t->summary());
613 DescribeThread(free_thread);
614 DescribeThread(alloc_thread);
615 } else {
616 Printf("allocated by thread T%d here:\n", alloc_thread->tid());
617 PrintStack(&alloc_stack);
618 DescribeThread(t->summary());
619 DescribeThread(alloc_thread);
620 }
621 }
622
Allocate(uptr alignment,uptr size,StackTrace * stack)623 static u8 *Allocate(uptr alignment, uptr size, StackTrace *stack) {
624 __asan_init();
625 CHECK(stack);
626 if (size == 0) {
627 size = 1; // TODO(kcc): do something smarter
628 }
629 CHECK(IsPowerOfTwo(alignment));
630 uptr rounded_size = RoundUpTo(size, REDZONE);
631 uptr needed_size = rounded_size + REDZONE;
632 if (alignment > REDZONE) {
633 needed_size += alignment;
634 }
635 CHECK(IsAligned(needed_size, REDZONE));
636 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
637 Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
638 (void*)size);
639 return 0;
640 }
641
642 u8 size_class = SizeToSizeClass(needed_size);
643 uptr size_to_allocate = SizeClassToSize(size_class);
644 CHECK(size_to_allocate >= kMinAllocSize);
645 CHECK(size_to_allocate >= needed_size);
646 CHECK(IsAligned(size_to_allocate, REDZONE));
647
648 if (flags()->verbosity >= 3) {
649 Printf("Allocate align: %zu size: %zu class: %u real: %zu\n",
650 alignment, size, size_class, size_to_allocate);
651 }
652
653 AsanThread *t = asanThreadRegistry().GetCurrent();
654 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
655 // Statistics
656 thread_stats.mallocs++;
657 thread_stats.malloced += size;
658 thread_stats.malloced_redzones += size_to_allocate - size;
659 thread_stats.malloced_by_size[size_class]++;
660
661 AsanChunk *m = 0;
662 if (!t || size_to_allocate >= kMaxSizeForThreadLocalFreeList) {
663 // get directly from global storage.
664 m = malloc_info.AllocateChunks(size_class, 1);
665 thread_stats.malloc_large++;
666 } else {
667 // get from the thread-local storage.
668 AsanChunk **fl = &t->malloc_storage().free_lists_[size_class];
669 if (!*fl) {
670 uptr n_new_chunks = kMaxSizeForThreadLocalFreeList / size_to_allocate;
671 *fl = malloc_info.AllocateChunks(size_class, n_new_chunks);
672 thread_stats.malloc_small_slow++;
673 }
674 m = *fl;
675 *fl = (*fl)->next;
676 }
677 CHECK(m);
678 CHECK(m->chunk_state == CHUNK_AVAILABLE);
679 m->chunk_state = CHUNK_ALLOCATED;
680 m->next = 0;
681 CHECK(m->Size() == size_to_allocate);
682 uptr addr = (uptr)m + REDZONE;
683 CHECK(addr <= (uptr)m->compressed_free_stack());
684
685 if (alignment > REDZONE && (addr & (alignment - 1))) {
686 addr = RoundUpTo(addr, alignment);
687 CHECK((addr & (alignment - 1)) == 0);
688 AsanChunk *p = (AsanChunk*)(addr - REDZONE);
689 p->chunk_state = CHUNK_MEMALIGN;
690 p->used_size = (uptr)p - (uptr)m;
691 m->alignment_log = Log2(alignment);
692 CHECK(m->Beg() == addr);
693 } else {
694 m->alignment_log = Log2(REDZONE);
695 }
696 CHECK(m == PtrToChunk(addr));
697 m->used_size = size;
698 CHECK(m->Beg() == addr);
699 m->alloc_tid = t ? t->tid() : 0;
700 m->free_tid = kInvalidTid;
701 StackTrace::CompressStack(stack, m->compressed_alloc_stack(),
702 m->compressed_alloc_stack_size());
703 PoisonShadow(addr, rounded_size, 0);
704 if (size < rounded_size) {
705 PoisonHeapPartialRightRedzone(addr + rounded_size - REDZONE,
706 size & (REDZONE - 1));
707 }
708 if (size <= (uptr)(flags()->max_malloc_fill_size)) {
709 REAL(memset)((void*)addr, 0, rounded_size);
710 }
711 return (u8*)addr;
712 }
713
Deallocate(u8 * ptr,StackTrace * stack)714 static void Deallocate(u8 *ptr, StackTrace *stack) {
715 if (!ptr) return;
716 CHECK(stack);
717
718 if (flags()->debug) {
719 CHECK(malloc_info.FindPageGroup((uptr)ptr));
720 }
721
722 // Printf("Deallocate %p\n", ptr);
723 AsanChunk *m = PtrToChunk((uptr)ptr);
724
725 // Flip the chunk_state atomically to avoid race on double-free.
726 u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE,
727 memory_order_acq_rel);
728
729 if (old_chunk_state == CHUNK_QUARANTINE) {
730 ReportDoubleFree((uptr)ptr, stack);
731 } else if (old_chunk_state != CHUNK_ALLOCATED) {
732 ReportFreeNotMalloced((uptr)ptr, stack);
733 }
734 CHECK(old_chunk_state == CHUNK_ALLOCATED);
735 // With REDZONE==16 m->next is in the user area, otherwise it should be 0.
736 CHECK(REDZONE <= 16 || !m->next);
737 CHECK(m->free_tid == kInvalidTid);
738 CHECK(m->alloc_tid >= 0);
739 AsanThread *t = asanThreadRegistry().GetCurrent();
740 m->free_tid = t ? t->tid() : 0;
741 StackTrace::CompressStack(stack, m->compressed_free_stack(),
742 m->compressed_free_stack_size());
743 uptr rounded_size = RoundUpTo(m->used_size, REDZONE);
744 PoisonShadow((uptr)ptr, rounded_size, kAsanHeapFreeMagic);
745
746 // Statistics.
747 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
748 thread_stats.frees++;
749 thread_stats.freed += m->used_size;
750 thread_stats.freed_by_size[m->SizeClass()]++;
751
752 CHECK(m->chunk_state == CHUNK_QUARANTINE);
753
754 if (t) {
755 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
756 ms->quarantine_.Push(m);
757
758 if (ms->quarantine_.size() > kMaxThreadLocalQuarantine) {
759 malloc_info.SwallowThreadLocalMallocStorage(ms, false);
760 }
761 } else {
762 malloc_info.BypassThreadLocalQuarantine(m);
763 }
764 }
765
Reallocate(u8 * old_ptr,uptr new_size,StackTrace * stack)766 static u8 *Reallocate(u8 *old_ptr, uptr new_size,
767 StackTrace *stack) {
768 CHECK(old_ptr && new_size);
769
770 // Statistics.
771 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
772 thread_stats.reallocs++;
773 thread_stats.realloced += new_size;
774
775 AsanChunk *m = PtrToChunk((uptr)old_ptr);
776 CHECK(m->chunk_state == CHUNK_ALLOCATED);
777 uptr old_size = m->used_size;
778 uptr memcpy_size = Min(new_size, old_size);
779 u8 *new_ptr = Allocate(0, new_size, stack);
780 if (new_ptr) {
781 CHECK(REAL(memcpy) != 0);
782 REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
783 Deallocate(old_ptr, stack);
784 }
785 return new_ptr;
786 }
787
788 } // namespace __asan
789
790 // Default (no-op) implementation of malloc hooks.
791 extern "C" {
792 SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
__asan_malloc_hook(void * ptr,uptr size)793 void __asan_malloc_hook(void *ptr, uptr size) {
794 (void)ptr;
795 (void)size;
796 }
797 SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
__asan_free_hook(void * ptr)798 void __asan_free_hook(void *ptr) {
799 (void)ptr;
800 }
801 } // extern "C"
802
803 namespace __asan {
804
805 SANITIZER_INTERFACE_ATTRIBUTE
asan_memalign(uptr alignment,uptr size,StackTrace * stack)806 void *asan_memalign(uptr alignment, uptr size, StackTrace *stack) {
807 void *ptr = (void*)Allocate(alignment, size, stack);
808 __asan_malloc_hook(ptr, size);
809 return ptr;
810 }
811
812 SANITIZER_INTERFACE_ATTRIBUTE
asan_free(void * ptr,StackTrace * stack)813 void asan_free(void *ptr, StackTrace *stack) {
814 __asan_free_hook(ptr);
815 Deallocate((u8*)ptr, stack);
816 }
817
818 SANITIZER_INTERFACE_ATTRIBUTE
asan_malloc(uptr size,StackTrace * stack)819 void *asan_malloc(uptr size, StackTrace *stack) {
820 void *ptr = (void*)Allocate(0, size, stack);
821 __asan_malloc_hook(ptr, size);
822 return ptr;
823 }
824
asan_calloc(uptr nmemb,uptr size,StackTrace * stack)825 void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
826 void *ptr = (void*)Allocate(0, nmemb * size, stack);
827 if (ptr)
828 REAL(memset)(ptr, 0, nmemb * size);
829 __asan_malloc_hook(ptr, nmemb * size);
830 return ptr;
831 }
832
asan_realloc(void * p,uptr size,StackTrace * stack)833 void *asan_realloc(void *p, uptr size, StackTrace *stack) {
834 if (p == 0) {
835 void *ptr = (void*)Allocate(0, size, stack);
836 __asan_malloc_hook(ptr, size);
837 return ptr;
838 } else if (size == 0) {
839 __asan_free_hook(p);
840 Deallocate((u8*)p, stack);
841 return 0;
842 }
843 return Reallocate((u8*)p, size, stack);
844 }
845
asan_valloc(uptr size,StackTrace * stack)846 void *asan_valloc(uptr size, StackTrace *stack) {
847 void *ptr = (void*)Allocate(kPageSize, size, stack);
848 __asan_malloc_hook(ptr, size);
849 return ptr;
850 }
851
asan_pvalloc(uptr size,StackTrace * stack)852 void *asan_pvalloc(uptr size, StackTrace *stack) {
853 size = RoundUpTo(size, kPageSize);
854 if (size == 0) {
855 // pvalloc(0) should allocate one page.
856 size = kPageSize;
857 }
858 void *ptr = (void*)Allocate(kPageSize, size, stack);
859 __asan_malloc_hook(ptr, size);
860 return ptr;
861 }
862
asan_posix_memalign(void ** memptr,uptr alignment,uptr size,StackTrace * stack)863 int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
864 StackTrace *stack) {
865 void *ptr = Allocate(alignment, size, stack);
866 CHECK(IsAligned((uptr)ptr, alignment));
867 __asan_malloc_hook(ptr, size);
868 *memptr = ptr;
869 return 0;
870 }
871
asan_malloc_usable_size(void * ptr,StackTrace * stack)872 uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) {
873 CHECK(stack);
874 if (ptr == 0) return 0;
875 uptr usable_size = malloc_info.AllocationSize((uptr)ptr);
876 if (flags()->check_malloc_usable_size && (usable_size == 0)) {
877 ReportMallocUsableSizeNotOwned((uptr)ptr, stack);
878 }
879 return usable_size;
880 }
881
asan_mz_size(const void * ptr)882 uptr asan_mz_size(const void *ptr) {
883 return malloc_info.AllocationSize((uptr)ptr);
884 }
885
asan_mz_force_lock()886 void asan_mz_force_lock() {
887 malloc_info.ForceLock();
888 }
889
asan_mz_force_unlock()890 void asan_mz_force_unlock() {
891 malloc_info.ForceUnlock();
892 }
893
894 // ---------------------- Fake stack-------------------- {{{1
FakeStack()895 FakeStack::FakeStack() {
896 CHECK(REAL(memset) != 0);
897 REAL(memset)(this, 0, sizeof(*this));
898 }
899
AddrIsInSizeClass(uptr addr,uptr size_class)900 bool FakeStack::AddrIsInSizeClass(uptr addr, uptr size_class) {
901 uptr mem = allocated_size_classes_[size_class];
902 uptr size = ClassMmapSize(size_class);
903 bool res = mem && addr >= mem && addr < mem + size;
904 return res;
905 }
906
AddrIsInFakeStack(uptr addr)907 uptr FakeStack::AddrIsInFakeStack(uptr addr) {
908 for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
909 if (AddrIsInSizeClass(addr, i)) return allocated_size_classes_[i];
910 }
911 return 0;
912 }
913
914 // We may want to compute this during compilation.
ComputeSizeClass(uptr alloc_size)915 inline uptr FakeStack::ComputeSizeClass(uptr alloc_size) {
916 uptr rounded_size = RoundUpToPowerOfTwo(alloc_size);
917 uptr log = Log2(rounded_size);
918 CHECK(alloc_size <= (1UL << log));
919 if (!(alloc_size > (1UL << (log-1)))) {
920 Printf("alloc_size %zu log %zu\n", alloc_size, log);
921 }
922 CHECK(alloc_size > (1UL << (log-1)));
923 uptr res = log < kMinStackFrameSizeLog ? 0 : log - kMinStackFrameSizeLog;
924 CHECK(res < kNumberOfSizeClasses);
925 CHECK(ClassSize(res) >= rounded_size);
926 return res;
927 }
928
FifoPush(FakeFrame * node)929 void FakeFrameFifo::FifoPush(FakeFrame *node) {
930 CHECK(node);
931 node->next = 0;
932 if (first_ == 0 && last_ == 0) {
933 first_ = last_ = node;
934 } else {
935 CHECK(first_);
936 CHECK(last_);
937 last_->next = node;
938 last_ = node;
939 }
940 }
941
FifoPop()942 FakeFrame *FakeFrameFifo::FifoPop() {
943 CHECK(first_ && last_ && "Exhausted fake stack");
944 FakeFrame *res = 0;
945 if (first_ == last_) {
946 res = first_;
947 first_ = last_ = 0;
948 } else {
949 res = first_;
950 first_ = first_->next;
951 }
952 return res;
953 }
954
Init(uptr stack_size)955 void FakeStack::Init(uptr stack_size) {
956 stack_size_ = stack_size;
957 alive_ = true;
958 }
959
Cleanup()960 void FakeStack::Cleanup() {
961 alive_ = false;
962 for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
963 uptr mem = allocated_size_classes_[i];
964 if (mem) {
965 PoisonShadow(mem, ClassMmapSize(i), 0);
966 allocated_size_classes_[i] = 0;
967 UnmapOrDie((void*)mem, ClassMmapSize(i));
968 }
969 }
970 }
971
ClassMmapSize(uptr size_class)972 uptr FakeStack::ClassMmapSize(uptr size_class) {
973 return RoundUpToPowerOfTwo(stack_size_);
974 }
975
AllocateOneSizeClass(uptr size_class)976 void FakeStack::AllocateOneSizeClass(uptr size_class) {
977 CHECK(ClassMmapSize(size_class) >= kPageSize);
978 uptr new_mem = (uptr)MmapOrDie(
979 ClassMmapSize(size_class), __FUNCTION__);
980 // Printf("T%d new_mem[%zu]: %p-%p mmap %zu\n",
981 // asanThreadRegistry().GetCurrent()->tid(),
982 // size_class, new_mem, new_mem + ClassMmapSize(size_class),
983 // ClassMmapSize(size_class));
984 uptr i;
985 for (i = 0; i < ClassMmapSize(size_class);
986 i += ClassSize(size_class)) {
987 size_classes_[size_class].FifoPush((FakeFrame*)(new_mem + i));
988 }
989 CHECK(i == ClassMmapSize(size_class));
990 allocated_size_classes_[size_class] = new_mem;
991 }
992
AllocateStack(uptr size,uptr real_stack)993 uptr FakeStack::AllocateStack(uptr size, uptr real_stack) {
994 if (!alive_) return real_stack;
995 CHECK(size <= kMaxStackMallocSize && size > 1);
996 uptr size_class = ComputeSizeClass(size);
997 if (!allocated_size_classes_[size_class]) {
998 AllocateOneSizeClass(size_class);
999 }
1000 FakeFrame *fake_frame = size_classes_[size_class].FifoPop();
1001 CHECK(fake_frame);
1002 fake_frame->size_minus_one = size - 1;
1003 fake_frame->real_stack = real_stack;
1004 while (FakeFrame *top = call_stack_.top()) {
1005 if (top->real_stack > real_stack) break;
1006 call_stack_.LifoPop();
1007 DeallocateFrame(top);
1008 }
1009 call_stack_.LifoPush(fake_frame);
1010 uptr ptr = (uptr)fake_frame;
1011 PoisonShadow(ptr, size, 0);
1012 return ptr;
1013 }
1014
DeallocateFrame(FakeFrame * fake_frame)1015 void FakeStack::DeallocateFrame(FakeFrame *fake_frame) {
1016 CHECK(alive_);
1017 uptr size = fake_frame->size_minus_one + 1;
1018 uptr size_class = ComputeSizeClass(size);
1019 CHECK(allocated_size_classes_[size_class]);
1020 uptr ptr = (uptr)fake_frame;
1021 CHECK(AddrIsInSizeClass(ptr, size_class));
1022 CHECK(AddrIsInSizeClass(ptr + size - 1, size_class));
1023 size_classes_[size_class].FifoPush(fake_frame);
1024 }
1025
OnFree(uptr ptr,uptr size,uptr real_stack)1026 void FakeStack::OnFree(uptr ptr, uptr size, uptr real_stack) {
1027 FakeFrame *fake_frame = (FakeFrame*)ptr;
1028 CHECK(fake_frame->magic = kRetiredStackFrameMagic);
1029 CHECK(fake_frame->descr != 0);
1030 CHECK(fake_frame->size_minus_one == size - 1);
1031 PoisonShadow(ptr, size, kAsanStackAfterReturnMagic);
1032 }
1033
1034 } // namespace __asan
1035
1036 // ---------------------- Interface ---------------- {{{1
1037 using namespace __asan; // NOLINT
1038
__asan_stack_malloc(uptr size,uptr real_stack)1039 uptr __asan_stack_malloc(uptr size, uptr real_stack) {
1040 if (!flags()->use_fake_stack) return real_stack;
1041 AsanThread *t = asanThreadRegistry().GetCurrent();
1042 if (!t) {
1043 // TSD is gone, use the real stack.
1044 return real_stack;
1045 }
1046 uptr ptr = t->fake_stack().AllocateStack(size, real_stack);
1047 // Printf("__asan_stack_malloc %p %zu %p\n", ptr, size, real_stack);
1048 return ptr;
1049 }
1050
__asan_stack_free(uptr ptr,uptr size,uptr real_stack)1051 void __asan_stack_free(uptr ptr, uptr size, uptr real_stack) {
1052 if (!flags()->use_fake_stack) return;
1053 if (ptr != real_stack) {
1054 FakeStack::OnFree(ptr, size, real_stack);
1055 }
1056 }
1057
1058 // ASan allocator doesn't reserve extra bytes, so normally we would
1059 // just return "size".
__asan_get_estimated_allocated_size(uptr size)1060 uptr __asan_get_estimated_allocated_size(uptr size) {
1061 if (size == 0) return 1;
1062 return Min(size, kMaxAllowedMallocSize);
1063 }
1064
__asan_get_ownership(const void * p)1065 bool __asan_get_ownership(const void *p) {
1066 return malloc_info.AllocationSize((uptr)p) > 0;
1067 }
1068
__asan_get_allocated_size(const void * p)1069 uptr __asan_get_allocated_size(const void *p) {
1070 if (p == 0) return 0;
1071 uptr allocated_size = malloc_info.AllocationSize((uptr)p);
1072 // Die if p is not malloced or if it is already freed.
1073 if (allocated_size == 0) {
1074 GET_STACK_TRACE_HERE(kStackTraceMax);
1075 ReportAsanGetAllocatedSizeNotOwned((uptr)p, &stack);
1076 }
1077 return allocated_size;
1078 }
1079