1 //===-- asan_allocator.cc ---------------------------------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of AddressSanitizer, an address sanity checker.
11 //
12 // Implementation of ASan's memory allocator.
13 // Evey piece of memory (AsanChunk) allocated by the allocator
14 // has a left redzone of REDZONE bytes and
15 // a right redzone such that the end of the chunk is aligned by REDZONE
16 // (i.e. the right redzone is between 0 and REDZONE-1).
17 // The left redzone is always poisoned.
18 // The right redzone is poisoned on malloc, the body is poisoned on free.
19 // Once freed, a chunk is moved to a quarantine (fifo list).
20 // After quarantine, a chunk is returned to freelists.
21 //
22 // The left redzone contains ASan's internal data and the stack trace of
23 // the malloc call.
24 // Once freed, the body of the chunk contains the stack trace of the free call.
25 //
26 //===----------------------------------------------------------------------===//
27
28 #include "asan_allocator.h"
29 #include "asan_interceptors.h"
30 #include "asan_interface.h"
31 #include "asan_internal.h"
32 #include "asan_lock.h"
33 #include "asan_mapping.h"
34 #include "asan_stats.h"
35 #include "asan_thread.h"
36 #include "asan_thread_registry.h"
37
38 #ifdef _WIN32
39 #include <intrin.h>
40 #endif
41
42 namespace __asan {
43
44 #define REDZONE FLAG_redzone
45 static const size_t kMinAllocSize = REDZONE * 2;
46 static const uint64_t kMaxAvailableRam = 128ULL << 30; // 128G
47 static const size_t kMaxThreadLocalQuarantine = 1 << 20; // 1M
48
49 static const size_t kMinMmapSize = (ASAN_LOW_MEMORY) ? 4UL << 17 : 4UL << 20;
50 static const size_t kMaxSizeForThreadLocalFreeList =
51 (ASAN_LOW_MEMORY) ? 1 << 15 : 1 << 17;
52
53 // Size classes less than kMallocSizeClassStep are powers of two.
54 // All other size classes are multiples of kMallocSizeClassStep.
55 static const size_t kMallocSizeClassStepLog = 26;
56 static const size_t kMallocSizeClassStep = 1UL << kMallocSizeClassStepLog;
57
58 static const size_t kMaxAllowedMallocSize =
59 (__WORDSIZE == 32) ? 3UL << 30 : 8UL << 30;
60
IsAligned(uintptr_t a,uintptr_t alignment)61 static inline bool IsAligned(uintptr_t a, uintptr_t alignment) {
62 return (a & (alignment - 1)) == 0;
63 }
64
Log2(size_t x)65 static inline size_t Log2(size_t x) {
66 CHECK(IsPowerOfTwo(x));
67 #if defined(_WIN64)
68 unsigned long ret; // NOLINT
69 _BitScanForward64(&ret, x);
70 return ret;
71 #elif defined(_WIN32)
72 unsigned long ret; // NOLINT
73 _BitScanForward(&ret, x);
74 return ret;
75 #else
76 return __builtin_ctzl(x);
77 #endif
78 }
79
RoundUpToPowerOfTwo(size_t size)80 static inline size_t RoundUpToPowerOfTwo(size_t size) {
81 CHECK(size);
82 if (IsPowerOfTwo(size)) return size;
83
84 unsigned long up; // NOLINT
85 #if defined(_WIN64)
86 _BitScanReverse64(&up, size);
87 #elif defined(_WIN32)
88 _BitScanReverse(&up, size);
89 #else
90 up = __WORDSIZE - 1 - __builtin_clzl(size);
91 #endif
92 CHECK(size < (1ULL << (up + 1)));
93 CHECK(size > (1ULL << up));
94 return 1UL << (up + 1);
95 }
96
SizeClassToSize(uint8_t size_class)97 static inline size_t SizeClassToSize(uint8_t size_class) {
98 CHECK(size_class < kNumberOfSizeClasses);
99 if (size_class <= kMallocSizeClassStepLog) {
100 return 1UL << size_class;
101 } else {
102 return (size_class - kMallocSizeClassStepLog) * kMallocSizeClassStep;
103 }
104 }
105
SizeToSizeClass(size_t size)106 static inline uint8_t SizeToSizeClass(size_t size) {
107 uint8_t res = 0;
108 if (size <= kMallocSizeClassStep) {
109 size_t rounded = RoundUpToPowerOfTwo(size);
110 res = Log2(rounded);
111 } else {
112 res = ((size + kMallocSizeClassStep - 1) / kMallocSizeClassStep)
113 + kMallocSizeClassStepLog;
114 }
115 CHECK(res < kNumberOfSizeClasses);
116 CHECK(size <= SizeClassToSize(res));
117 return res;
118 }
119
120 // Given REDZONE bytes, we need to mark first size bytes
121 // as addressable and the rest REDZONE-size bytes as unaddressable.
PoisonHeapPartialRightRedzone(uintptr_t mem,size_t size)122 static void PoisonHeapPartialRightRedzone(uintptr_t mem, size_t size) {
123 CHECK(size <= REDZONE);
124 CHECK(IsAligned(mem, REDZONE));
125 CHECK(IsPowerOfTwo(SHADOW_GRANULARITY));
126 CHECK(IsPowerOfTwo(REDZONE));
127 CHECK(REDZONE >= SHADOW_GRANULARITY);
128 PoisonShadowPartialRightRedzone(mem, size, REDZONE,
129 kAsanHeapRightRedzoneMagic);
130 }
131
MmapNewPagesAndPoisonShadow(size_t size)132 static uint8_t *MmapNewPagesAndPoisonShadow(size_t size) {
133 CHECK(IsAligned(size, kPageSize));
134 uint8_t *res = (uint8_t*)AsanMmapSomewhereOrDie(size, __FUNCTION__);
135 PoisonShadow((uintptr_t)res, size, kAsanHeapLeftRedzoneMagic);
136 if (FLAG_debug) {
137 Printf("ASAN_MMAP: [%p, %p)\n", res, res + size);
138 }
139 return res;
140 }
141
142 // Every chunk of memory allocated by this allocator can be in one of 3 states:
143 // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
144 // CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
145 // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
146 //
147 // The pseudo state CHUNK_MEMALIGN is used to mark that the address is not
148 // the beginning of a AsanChunk (in which case 'next' contains the address
149 // of the AsanChunk).
150 //
151 // The magic numbers for the enum values are taken randomly.
152 enum {
153 CHUNK_AVAILABLE = 0x573B,
154 CHUNK_ALLOCATED = 0x3204,
155 CHUNK_QUARANTINE = 0x1978,
156 CHUNK_MEMALIGN = 0xDC68,
157 };
158
159 struct ChunkBase {
160 uint16_t chunk_state;
161 uint8_t size_class;
162 uint32_t offset; // User-visible memory starts at this+offset (beg()).
163 int32_t alloc_tid;
164 int32_t free_tid;
165 size_t used_size; // Size requested by the user.
166 AsanChunk *next;
167
beg__asan::ChunkBase168 uintptr_t beg() { return (uintptr_t)this + offset; }
Size__asan::ChunkBase169 size_t Size() { return SizeClassToSize(size_class); }
SizeClass__asan::ChunkBase170 uint8_t SizeClass() { return size_class; }
171 };
172
173 struct AsanChunk: public ChunkBase {
compressed_alloc_stack__asan::AsanChunk174 uint32_t *compressed_alloc_stack() {
175 CHECK(REDZONE >= sizeof(ChunkBase));
176 return (uint32_t*)((uintptr_t)this + sizeof(ChunkBase));
177 }
compressed_free_stack__asan::AsanChunk178 uint32_t *compressed_free_stack() {
179 CHECK(REDZONE >= sizeof(ChunkBase));
180 return (uint32_t*)((uintptr_t)this + REDZONE);
181 }
182
183 // The left redzone after the ChunkBase is given to the alloc stack trace.
compressed_alloc_stack_size__asan::AsanChunk184 size_t compressed_alloc_stack_size() {
185 return (REDZONE - sizeof(ChunkBase)) / sizeof(uint32_t);
186 }
compressed_free_stack_size__asan::AsanChunk187 size_t compressed_free_stack_size() {
188 return (REDZONE) / sizeof(uint32_t);
189 }
190
AddrIsInside__asan::AsanChunk191 bool AddrIsInside(uintptr_t addr, size_t access_size, size_t *offset) {
192 if (addr >= beg() && (addr + access_size) <= (beg() + used_size)) {
193 *offset = addr - beg();
194 return true;
195 }
196 return false;
197 }
198
AddrIsAtLeft__asan::AsanChunk199 bool AddrIsAtLeft(uintptr_t addr, size_t access_size, size_t *offset) {
200 if (addr < beg()) {
201 *offset = beg() - addr;
202 return true;
203 }
204 return false;
205 }
206
AddrIsAtRight__asan::AsanChunk207 bool AddrIsAtRight(uintptr_t addr, size_t access_size, size_t *offset) {
208 if (addr + access_size >= beg() + used_size) {
209 if (addr <= beg() + used_size)
210 *offset = 0;
211 else
212 *offset = addr - (beg() + used_size);
213 return true;
214 }
215 return false;
216 }
217
DescribeAddress__asan::AsanChunk218 void DescribeAddress(uintptr_t addr, size_t access_size) {
219 size_t offset;
220 Printf("%p is located ", addr);
221 if (AddrIsInside(addr, access_size, &offset)) {
222 Printf("%zu bytes inside of", offset);
223 } else if (AddrIsAtLeft(addr, access_size, &offset)) {
224 Printf("%zu bytes to the left of", offset);
225 } else if (AddrIsAtRight(addr, access_size, &offset)) {
226 Printf("%zu bytes to the right of", offset);
227 } else {
228 Printf(" somewhere around (this is AddressSanitizer bug!)");
229 }
230 Printf(" %zu-byte region [%p,%p)\n",
231 used_size, beg(), beg() + used_size);
232 }
233 };
234
PtrToChunk(uintptr_t ptr)235 static AsanChunk *PtrToChunk(uintptr_t ptr) {
236 AsanChunk *m = (AsanChunk*)(ptr - REDZONE);
237 if (m->chunk_state == CHUNK_MEMALIGN) {
238 m = m->next;
239 }
240 return m;
241 }
242
243
PushList(AsanChunkFifoList * q)244 void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
245 CHECK(q->size() > 0);
246 if (last_) {
247 CHECK(first_);
248 CHECK(!last_->next);
249 last_->next = q->first_;
250 last_ = q->last_;
251 } else {
252 CHECK(!first_);
253 last_ = q->last_;
254 first_ = q->first_;
255 CHECK(first_);
256 }
257 CHECK(last_);
258 CHECK(!last_->next);
259 size_ += q->size();
260 q->clear();
261 }
262
Push(AsanChunk * n)263 void AsanChunkFifoList::Push(AsanChunk *n) {
264 CHECK(n->next == NULL);
265 if (last_) {
266 CHECK(first_);
267 CHECK(!last_->next);
268 last_->next = n;
269 last_ = n;
270 } else {
271 CHECK(!first_);
272 last_ = first_ = n;
273 }
274 size_ += n->Size();
275 }
276
277 // Interesting performance observation: this function takes up to 15% of overal
278 // allocator time. That's because *first_ has been evicted from cache long time
279 // ago. Not sure if we can or want to do anything with this.
Pop()280 AsanChunk *AsanChunkFifoList::Pop() {
281 CHECK(first_);
282 AsanChunk *res = first_;
283 first_ = first_->next;
284 if (first_ == NULL)
285 last_ = NULL;
286 CHECK(size_ >= res->Size());
287 size_ -= res->Size();
288 if (last_) {
289 CHECK(!last_->next);
290 }
291 return res;
292 }
293
294 // All pages we ever allocated.
295 struct PageGroup {
296 uintptr_t beg;
297 uintptr_t end;
298 size_t size_of_chunk;
299 uintptr_t last_chunk;
InRange__asan::PageGroup300 bool InRange(uintptr_t addr) {
301 return addr >= beg && addr < end;
302 }
303 };
304
305 class MallocInfo {
306 public:
307
MallocInfo(LinkerInitialized x)308 explicit MallocInfo(LinkerInitialized x) : mu_(x) { }
309
AllocateChunks(uint8_t size_class,size_t n_chunks)310 AsanChunk *AllocateChunks(uint8_t size_class, size_t n_chunks) {
311 AsanChunk *m = NULL;
312 AsanChunk **fl = &free_lists_[size_class];
313 {
314 ScopedLock lock(&mu_);
315 for (size_t i = 0; i < n_chunks; i++) {
316 if (!(*fl)) {
317 *fl = GetNewChunks(size_class);
318 }
319 AsanChunk *t = *fl;
320 *fl = t->next;
321 t->next = m;
322 CHECK(t->chunk_state == CHUNK_AVAILABLE);
323 m = t;
324 }
325 }
326 return m;
327 }
328
SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage * x,bool eat_free_lists)329 void SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage *x,
330 bool eat_free_lists) {
331 CHECK(FLAG_quarantine_size > 0);
332 ScopedLock lock(&mu_);
333 AsanChunkFifoList *q = &x->quarantine_;
334 if (q->size() > 0) {
335 quarantine_.PushList(q);
336 while (quarantine_.size() > FLAG_quarantine_size) {
337 QuarantinePop();
338 }
339 }
340 if (eat_free_lists) {
341 for (size_t size_class = 0; size_class < kNumberOfSizeClasses;
342 size_class++) {
343 AsanChunk *m = x->free_lists_[size_class];
344 while (m) {
345 AsanChunk *t = m->next;
346 m->next = free_lists_[size_class];
347 free_lists_[size_class] = m;
348 m = t;
349 }
350 x->free_lists_[size_class] = 0;
351 }
352 }
353 }
354
BypassThreadLocalQuarantine(AsanChunk * chunk)355 void BypassThreadLocalQuarantine(AsanChunk *chunk) {
356 ScopedLock lock(&mu_);
357 quarantine_.Push(chunk);
358 }
359
FindMallocedOrFreed(uintptr_t addr,size_t access_size)360 AsanChunk *FindMallocedOrFreed(uintptr_t addr, size_t access_size) {
361 ScopedLock lock(&mu_);
362 return FindChunkByAddr(addr);
363 }
364
AllocationSize(uintptr_t ptr)365 size_t AllocationSize(uintptr_t ptr) {
366 if (!ptr) return 0;
367 ScopedLock lock(&mu_);
368
369 // first, check if this is our memory
370 PageGroup *g = FindPageGroupUnlocked(ptr);
371 if (!g) return 0;
372 AsanChunk *m = PtrToChunk(ptr);
373 if (m->chunk_state == CHUNK_ALLOCATED) {
374 return m->used_size;
375 } else {
376 return 0;
377 }
378 }
379
ForceLock()380 void ForceLock() {
381 mu_.Lock();
382 }
383
ForceUnlock()384 void ForceUnlock() {
385 mu_.Unlock();
386 }
387
PrintStatus()388 void PrintStatus() {
389 ScopedLock lock(&mu_);
390 size_t malloced = 0;
391
392 Printf(" MallocInfo: in quarantine: %zu malloced: %zu; ",
393 quarantine_.size() >> 20, malloced >> 20);
394 for (size_t j = 1; j < kNumberOfSizeClasses; j++) {
395 AsanChunk *i = free_lists_[j];
396 if (!i) continue;
397 size_t t = 0;
398 for (; i; i = i->next) {
399 t += i->Size();
400 }
401 Printf("%zu:%zu ", j, t >> 20);
402 }
403 Printf("\n");
404 }
405
FindPageGroup(uintptr_t addr)406 PageGroup *FindPageGroup(uintptr_t addr) {
407 ScopedLock lock(&mu_);
408 return FindPageGroupUnlocked(addr);
409 }
410
411 private:
FindPageGroupUnlocked(uintptr_t addr)412 PageGroup *FindPageGroupUnlocked(uintptr_t addr) {
413 int n = n_page_groups_;
414 // If the page groups are not sorted yet, sort them.
415 if (n_sorted_page_groups_ < n) {
416 SortArray((uintptr_t*)page_groups_, n);
417 n_sorted_page_groups_ = n;
418 }
419 // Binary search over the page groups.
420 int beg = 0, end = n;
421 while (beg < end) {
422 int med = (beg + end) / 2;
423 uintptr_t g = (uintptr_t)page_groups_[med];
424 if (addr > g) {
425 // 'g' points to the end of the group, so 'addr'
426 // may not belong to page_groups_[med] or any previous group.
427 beg = med + 1;
428 } else {
429 // 'addr' may belong to page_groups_[med] or a previous group.
430 end = med;
431 }
432 }
433 if (beg >= n)
434 return NULL;
435 PageGroup *g = page_groups_[beg];
436 CHECK(g);
437 if (g->InRange(addr))
438 return g;
439 return NULL;
440 }
441
442 // We have an address between two chunks, and we want to report just one.
ChooseChunk(uintptr_t addr,AsanChunk * left_chunk,AsanChunk * right_chunk)443 AsanChunk *ChooseChunk(uintptr_t addr,
444 AsanChunk *left_chunk, AsanChunk *right_chunk) {
445 // Prefer an allocated chunk or a chunk from quarantine.
446 if (left_chunk->chunk_state == CHUNK_AVAILABLE &&
447 right_chunk->chunk_state != CHUNK_AVAILABLE)
448 return right_chunk;
449 if (right_chunk->chunk_state == CHUNK_AVAILABLE &&
450 left_chunk->chunk_state != CHUNK_AVAILABLE)
451 return left_chunk;
452 // Choose based on offset.
453 size_t l_offset = 0, r_offset = 0;
454 CHECK(left_chunk->AddrIsAtRight(addr, 1, &l_offset));
455 CHECK(right_chunk->AddrIsAtLeft(addr, 1, &r_offset));
456 if (l_offset < r_offset)
457 return left_chunk;
458 return right_chunk;
459 }
460
FindChunkByAddr(uintptr_t addr)461 AsanChunk *FindChunkByAddr(uintptr_t addr) {
462 PageGroup *g = FindPageGroupUnlocked(addr);
463 if (!g) return 0;
464 CHECK(g->size_of_chunk);
465 uintptr_t offset_from_beg = addr - g->beg;
466 uintptr_t this_chunk_addr = g->beg +
467 (offset_from_beg / g->size_of_chunk) * g->size_of_chunk;
468 CHECK(g->InRange(this_chunk_addr));
469 AsanChunk *m = (AsanChunk*)this_chunk_addr;
470 CHECK(m->chunk_state == CHUNK_ALLOCATED ||
471 m->chunk_state == CHUNK_AVAILABLE ||
472 m->chunk_state == CHUNK_QUARANTINE);
473 size_t offset = 0;
474 if (m->AddrIsInside(addr, 1, &offset))
475 return m;
476
477 if (m->AddrIsAtRight(addr, 1, &offset)) {
478 if (this_chunk_addr == g->last_chunk) // rightmost chunk
479 return m;
480 uintptr_t right_chunk_addr = this_chunk_addr + g->size_of_chunk;
481 CHECK(g->InRange(right_chunk_addr));
482 return ChooseChunk(addr, m, (AsanChunk*)right_chunk_addr);
483 } else {
484 CHECK(m->AddrIsAtLeft(addr, 1, &offset));
485 if (this_chunk_addr == g->beg) // leftmost chunk
486 return m;
487 uintptr_t left_chunk_addr = this_chunk_addr - g->size_of_chunk;
488 CHECK(g->InRange(left_chunk_addr));
489 return ChooseChunk(addr, (AsanChunk*)left_chunk_addr, m);
490 }
491 }
492
QuarantinePop()493 void QuarantinePop() {
494 CHECK(quarantine_.size() > 0);
495 AsanChunk *m = quarantine_.Pop();
496 CHECK(m);
497 // if (F_v >= 2) Printf("MallocInfo::pop %p\n", m);
498
499 CHECK(m->chunk_state == CHUNK_QUARANTINE);
500 m->chunk_state = CHUNK_AVAILABLE;
501 CHECK(m->alloc_tid >= 0);
502 CHECK(m->free_tid >= 0);
503
504 size_t size_class = m->SizeClass();
505 m->next = free_lists_[size_class];
506 free_lists_[size_class] = m;
507
508 // Statistics.
509 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
510 thread_stats.real_frees++;
511 thread_stats.really_freed += m->used_size;
512 thread_stats.really_freed_redzones += m->Size() - m->used_size;
513 thread_stats.really_freed_by_size[m->SizeClass()]++;
514 }
515
516 // Get a list of newly allocated chunks.
GetNewChunks(uint8_t size_class)517 AsanChunk *GetNewChunks(uint8_t size_class) {
518 size_t size = SizeClassToSize(size_class);
519 CHECK(IsPowerOfTwo(kMinMmapSize));
520 CHECK(size < kMinMmapSize || (size % kMinMmapSize) == 0);
521 size_t mmap_size = Max(size, kMinMmapSize);
522 size_t n_chunks = mmap_size / size;
523 CHECK(n_chunks * size == mmap_size);
524 if (size < kPageSize) {
525 // Size is small, just poison the last chunk.
526 n_chunks--;
527 } else {
528 // Size is large, allocate an extra page at right and poison it.
529 mmap_size += kPageSize;
530 }
531 CHECK(n_chunks > 0);
532 uint8_t *mem = MmapNewPagesAndPoisonShadow(mmap_size);
533
534 // Statistics.
535 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
536 thread_stats.mmaps++;
537 thread_stats.mmaped += mmap_size;
538 thread_stats.mmaped_by_size[size_class] += n_chunks;
539
540 AsanChunk *res = NULL;
541 for (size_t i = 0; i < n_chunks; i++) {
542 AsanChunk *m = (AsanChunk*)(mem + i * size);
543 m->chunk_state = CHUNK_AVAILABLE;
544 m->size_class = size_class;
545 m->next = res;
546 res = m;
547 }
548 PageGroup *pg = (PageGroup*)(mem + n_chunks * size);
549 // This memory is already poisoned, no need to poison it again.
550 pg->beg = (uintptr_t)mem;
551 pg->end = pg->beg + mmap_size;
552 pg->size_of_chunk = size;
553 pg->last_chunk = (uintptr_t)(mem + size * (n_chunks - 1));
554 int page_group_idx = AtomicInc(&n_page_groups_) - 1;
555 CHECK(page_group_idx < (int)ASAN_ARRAY_SIZE(page_groups_));
556 page_groups_[page_group_idx] = pg;
557 return res;
558 }
559
560 AsanChunk *free_lists_[kNumberOfSizeClasses];
561 AsanChunkFifoList quarantine_;
562 AsanLock mu_;
563
564 PageGroup *page_groups_[kMaxAvailableRam / kMinMmapSize];
565 int n_page_groups_; // atomic
566 int n_sorted_page_groups_;
567 };
568
569 static MallocInfo malloc_info(LINKER_INITIALIZED);
570
CommitBack()571 void AsanThreadLocalMallocStorage::CommitBack() {
572 malloc_info.SwallowThreadLocalMallocStorage(this, true);
573 }
574
Describe(uintptr_t addr,size_t access_size)575 static void Describe(uintptr_t addr, size_t access_size) {
576 AsanChunk *m = malloc_info.FindMallocedOrFreed(addr, access_size);
577 if (!m) return;
578 m->DescribeAddress(addr, access_size);
579 CHECK(m->alloc_tid >= 0);
580 AsanThreadSummary *alloc_thread =
581 asanThreadRegistry().FindByTid(m->alloc_tid);
582 AsanStackTrace alloc_stack;
583 AsanStackTrace::UncompressStack(&alloc_stack, m->compressed_alloc_stack(),
584 m->compressed_alloc_stack_size());
585 AsanThread *t = asanThreadRegistry().GetCurrent();
586 CHECK(t);
587 if (m->free_tid >= 0) {
588 AsanThreadSummary *free_thread =
589 asanThreadRegistry().FindByTid(m->free_tid);
590 Printf("freed by thread T%d here:\n", free_thread->tid());
591 AsanStackTrace free_stack;
592 AsanStackTrace::UncompressStack(&free_stack, m->compressed_free_stack(),
593 m->compressed_free_stack_size());
594 free_stack.PrintStack();
595 Printf("previously allocated by thread T%d here:\n",
596 alloc_thread->tid());
597
598 alloc_stack.PrintStack();
599 t->summary()->Announce();
600 free_thread->Announce();
601 alloc_thread->Announce();
602 } else {
603 Printf("allocated by thread T%d here:\n", alloc_thread->tid());
604 alloc_stack.PrintStack();
605 t->summary()->Announce();
606 alloc_thread->Announce();
607 }
608 }
609
Allocate(size_t alignment,size_t size,AsanStackTrace * stack)610 static uint8_t *Allocate(size_t alignment, size_t size, AsanStackTrace *stack) {
611 __asan_init();
612 CHECK(stack);
613 if (size == 0) {
614 size = 1; // TODO(kcc): do something smarter
615 }
616 CHECK(IsPowerOfTwo(alignment));
617 size_t rounded_size = RoundUpTo(size, REDZONE);
618 size_t needed_size = rounded_size + REDZONE;
619 if (alignment > REDZONE) {
620 needed_size += alignment;
621 }
622 CHECK(IsAligned(needed_size, REDZONE));
623 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
624 Report("WARNING: AddressSanitizer failed to allocate %p bytes\n", size);
625 return 0;
626 }
627
628 uint8_t size_class = SizeToSizeClass(needed_size);
629 size_t size_to_allocate = SizeClassToSize(size_class);
630 CHECK(size_to_allocate >= kMinAllocSize);
631 CHECK(size_to_allocate >= needed_size);
632 CHECK(IsAligned(size_to_allocate, REDZONE));
633
634 if (FLAG_v >= 3) {
635 Printf("Allocate align: %zu size: %zu class: %u real: %zu\n",
636 alignment, size, size_class, size_to_allocate);
637 }
638
639 AsanThread *t = asanThreadRegistry().GetCurrent();
640 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
641 // Statistics
642 thread_stats.mallocs++;
643 thread_stats.malloced += size;
644 thread_stats.malloced_redzones += size_to_allocate - size;
645 thread_stats.malloced_by_size[size_class]++;
646
647 AsanChunk *m = NULL;
648 if (!t || size_to_allocate >= kMaxSizeForThreadLocalFreeList) {
649 // get directly from global storage.
650 m = malloc_info.AllocateChunks(size_class, 1);
651 thread_stats.malloc_large++;
652 } else {
653 // get from the thread-local storage.
654 AsanChunk **fl = &t->malloc_storage().free_lists_[size_class];
655 if (!*fl) {
656 size_t n_new_chunks = kMaxSizeForThreadLocalFreeList / size_to_allocate;
657 *fl = malloc_info.AllocateChunks(size_class, n_new_chunks);
658 thread_stats.malloc_small_slow++;
659 }
660 m = *fl;
661 *fl = (*fl)->next;
662 }
663 CHECK(m);
664 CHECK(m->chunk_state == CHUNK_AVAILABLE);
665 m->chunk_state = CHUNK_ALLOCATED;
666 m->next = NULL;
667 CHECK(m->Size() == size_to_allocate);
668 uintptr_t addr = (uintptr_t)m + REDZONE;
669 CHECK(addr == (uintptr_t)m->compressed_free_stack());
670
671 if (alignment > REDZONE && (addr & (alignment - 1))) {
672 addr = RoundUpTo(addr, alignment);
673 CHECK((addr & (alignment - 1)) == 0);
674 AsanChunk *p = (AsanChunk*)(addr - REDZONE);
675 p->chunk_state = CHUNK_MEMALIGN;
676 p->next = m;
677 }
678 CHECK(m == PtrToChunk(addr));
679 m->used_size = size;
680 m->offset = addr - (uintptr_t)m;
681 CHECK(m->beg() == addr);
682 m->alloc_tid = t ? t->tid() : 0;
683 m->free_tid = AsanThread::kInvalidTid;
684 AsanStackTrace::CompressStack(stack, m->compressed_alloc_stack(),
685 m->compressed_alloc_stack_size());
686 PoisonShadow(addr, rounded_size, 0);
687 if (size < rounded_size) {
688 PoisonHeapPartialRightRedzone(addr + rounded_size - REDZONE,
689 size & (REDZONE - 1));
690 }
691 if (size <= FLAG_max_malloc_fill_size) {
692 REAL(memset)((void*)addr, 0, rounded_size);
693 }
694 return (uint8_t*)addr;
695 }
696
Deallocate(uint8_t * ptr,AsanStackTrace * stack)697 static void Deallocate(uint8_t *ptr, AsanStackTrace *stack) {
698 if (!ptr) return;
699 CHECK(stack);
700
701 if (FLAG_debug) {
702 CHECK(malloc_info.FindPageGroup((uintptr_t)ptr));
703 }
704
705 // Printf("Deallocate %p\n", ptr);
706 AsanChunk *m = PtrToChunk((uintptr_t)ptr);
707
708 // Flip the state atomically to avoid race on double-free.
709 uint16_t old_chunk_state = AtomicExchange(&m->chunk_state, CHUNK_QUARANTINE);
710
711 if (old_chunk_state == CHUNK_QUARANTINE) {
712 Report("ERROR: AddressSanitizer attempting double-free on %p:\n", ptr);
713 stack->PrintStack();
714 Describe((uintptr_t)ptr, 1);
715 ShowStatsAndAbort();
716 } else if (old_chunk_state != CHUNK_ALLOCATED) {
717 Report("ERROR: AddressSanitizer attempting free on address which was not"
718 " malloc()-ed: %p\n", ptr);
719 stack->PrintStack();
720 ShowStatsAndAbort();
721 }
722 CHECK(old_chunk_state == CHUNK_ALLOCATED);
723 CHECK(m->free_tid == AsanThread::kInvalidTid);
724 CHECK(m->alloc_tid >= 0);
725 AsanThread *t = asanThreadRegistry().GetCurrent();
726 m->free_tid = t ? t->tid() : 0;
727 AsanStackTrace::CompressStack(stack, m->compressed_free_stack(),
728 m->compressed_free_stack_size());
729 size_t rounded_size = RoundUpTo(m->used_size, REDZONE);
730 PoisonShadow((uintptr_t)ptr, rounded_size, kAsanHeapFreeMagic);
731
732 // Statistics.
733 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
734 thread_stats.frees++;
735 thread_stats.freed += m->used_size;
736 thread_stats.freed_by_size[m->SizeClass()]++;
737
738 CHECK(m->chunk_state == CHUNK_QUARANTINE);
739 if (t) {
740 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
741 CHECK(!m->next);
742 ms->quarantine_.Push(m);
743
744 if (ms->quarantine_.size() > kMaxThreadLocalQuarantine) {
745 malloc_info.SwallowThreadLocalMallocStorage(ms, false);
746 }
747 } else {
748 CHECK(!m->next);
749 malloc_info.BypassThreadLocalQuarantine(m);
750 }
751 }
752
Reallocate(uint8_t * old_ptr,size_t new_size,AsanStackTrace * stack)753 static uint8_t *Reallocate(uint8_t *old_ptr, size_t new_size,
754 AsanStackTrace *stack) {
755 CHECK(old_ptr && new_size);
756
757 // Statistics.
758 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
759 thread_stats.reallocs++;
760 thread_stats.realloced += new_size;
761
762 AsanChunk *m = PtrToChunk((uintptr_t)old_ptr);
763 CHECK(m->chunk_state == CHUNK_ALLOCATED);
764 size_t old_size = m->used_size;
765 size_t memcpy_size = Min(new_size, old_size);
766 uint8_t *new_ptr = Allocate(0, new_size, stack);
767 if (new_ptr) {
768 CHECK(REAL(memcpy) != NULL);
769 REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
770 Deallocate(old_ptr, stack);
771 }
772 return new_ptr;
773 }
774
775 } // namespace __asan
776
777 // Malloc hooks declaration.
778 // ASAN_NEW_HOOK(ptr, size) is called immediately after
779 // allocation of "size" bytes, which returned "ptr".
780 // ASAN_DELETE_HOOK(ptr) is called immediately before
781 // deallocation of "ptr".
782 // If ASAN_NEW_HOOK or ASAN_DELETE_HOOK is defined, user
783 // program must provide implementation of this hook.
784 // If macro is undefined, the hook is no-op.
785 #ifdef ASAN_NEW_HOOK
786 extern "C" void ASAN_NEW_HOOK(void *ptr, size_t size);
787 #else
ASAN_NEW_HOOK(void * ptr,size_t size)788 static inline void ASAN_NEW_HOOK(void *ptr, size_t size) { }
789 #endif
790
791 #ifdef ASAN_DELETE_HOOK
792 extern "C" void ASAN_DELETE_HOOK(void *ptr);
793 #else
ASAN_DELETE_HOOK(void * ptr)794 static inline void ASAN_DELETE_HOOK(void *ptr) { }
795 #endif
796
797 namespace __asan {
798
asan_memalign(size_t alignment,size_t size,AsanStackTrace * stack)799 void *asan_memalign(size_t alignment, size_t size, AsanStackTrace *stack) {
800 void *ptr = (void*)Allocate(alignment, size, stack);
801 ASAN_NEW_HOOK(ptr, size);
802 return ptr;
803 }
804
asan_free(void * ptr,AsanStackTrace * stack)805 void asan_free(void *ptr, AsanStackTrace *stack) {
806 ASAN_DELETE_HOOK(ptr);
807 Deallocate((uint8_t*)ptr, stack);
808 }
809
asan_malloc(size_t size,AsanStackTrace * stack)810 void *asan_malloc(size_t size, AsanStackTrace *stack) {
811 void *ptr = (void*)Allocate(0, size, stack);
812 ASAN_NEW_HOOK(ptr, size);
813 return ptr;
814 }
815
asan_calloc(size_t nmemb,size_t size,AsanStackTrace * stack)816 void *asan_calloc(size_t nmemb, size_t size, AsanStackTrace *stack) {
817 void *ptr = (void*)Allocate(0, nmemb * size, stack);
818 if (ptr)
819 REAL(memset)(ptr, 0, nmemb * size);
820 ASAN_NEW_HOOK(ptr, nmemb * size);
821 return ptr;
822 }
823
asan_realloc(void * p,size_t size,AsanStackTrace * stack)824 void *asan_realloc(void *p, size_t size, AsanStackTrace *stack) {
825 if (p == NULL) {
826 void *ptr = (void*)Allocate(0, size, stack);
827 ASAN_NEW_HOOK(ptr, size);
828 return ptr;
829 } else if (size == 0) {
830 ASAN_DELETE_HOOK(p);
831 Deallocate((uint8_t*)p, stack);
832 return NULL;
833 }
834 return Reallocate((uint8_t*)p, size, stack);
835 }
836
asan_valloc(size_t size,AsanStackTrace * stack)837 void *asan_valloc(size_t size, AsanStackTrace *stack) {
838 void *ptr = (void*)Allocate(kPageSize, size, stack);
839 ASAN_NEW_HOOK(ptr, size);
840 return ptr;
841 }
842
asan_pvalloc(size_t size,AsanStackTrace * stack)843 void *asan_pvalloc(size_t size, AsanStackTrace *stack) {
844 size = RoundUpTo(size, kPageSize);
845 if (size == 0) {
846 // pvalloc(0) should allocate one page.
847 size = kPageSize;
848 }
849 void *ptr = (void*)Allocate(kPageSize, size, stack);
850 ASAN_NEW_HOOK(ptr, size);
851 return ptr;
852 }
853
asan_posix_memalign(void ** memptr,size_t alignment,size_t size,AsanStackTrace * stack)854 int asan_posix_memalign(void **memptr, size_t alignment, size_t size,
855 AsanStackTrace *stack) {
856 void *ptr = Allocate(alignment, size, stack);
857 CHECK(IsAligned((uintptr_t)ptr, alignment));
858 ASAN_NEW_HOOK(ptr, size);
859 *memptr = ptr;
860 return 0;
861 }
862
asan_malloc_usable_size(void * ptr,AsanStackTrace * stack)863 size_t asan_malloc_usable_size(void *ptr, AsanStackTrace *stack) {
864 CHECK(stack);
865 if (ptr == NULL) return 0;
866 size_t usable_size = malloc_info.AllocationSize((uintptr_t)ptr);
867 if (usable_size == 0) {
868 Report("ERROR: AddressSanitizer attempting to call malloc_usable_size() "
869 "for pointer which is not owned: %p\n", ptr);
870 stack->PrintStack();
871 Describe((uintptr_t)ptr, 1);
872 ShowStatsAndAbort();
873 }
874 return usable_size;
875 }
876
asan_mz_size(const void * ptr)877 size_t asan_mz_size(const void *ptr) {
878 return malloc_info.AllocationSize((uintptr_t)ptr);
879 }
880
DescribeHeapAddress(uintptr_t addr,uintptr_t access_size)881 void DescribeHeapAddress(uintptr_t addr, uintptr_t access_size) {
882 Describe(addr, access_size);
883 }
884
asan_mz_force_lock()885 void asan_mz_force_lock() {
886 malloc_info.ForceLock();
887 }
888
asan_mz_force_unlock()889 void asan_mz_force_unlock() {
890 malloc_info.ForceUnlock();
891 }
892
893 // ---------------------- Fake stack-------------------- {{{1
FakeStack()894 FakeStack::FakeStack() {
895 CHECK(REAL(memset) != NULL);
896 REAL(memset)(this, 0, sizeof(*this));
897 }
898
AddrIsInSizeClass(uintptr_t addr,size_t size_class)899 bool FakeStack::AddrIsInSizeClass(uintptr_t addr, size_t size_class) {
900 uintptr_t mem = allocated_size_classes_[size_class];
901 uintptr_t size = ClassMmapSize(size_class);
902 bool res = mem && addr >= mem && addr < mem + size;
903 return res;
904 }
905
AddrIsInFakeStack(uintptr_t addr)906 uintptr_t FakeStack::AddrIsInFakeStack(uintptr_t addr) {
907 for (size_t i = 0; i < kNumberOfSizeClasses; i++) {
908 if (AddrIsInSizeClass(addr, i)) return allocated_size_classes_[i];
909 }
910 return 0;
911 }
912
913 // We may want to compute this during compilation.
ComputeSizeClass(size_t alloc_size)914 inline size_t FakeStack::ComputeSizeClass(size_t alloc_size) {
915 size_t rounded_size = RoundUpToPowerOfTwo(alloc_size);
916 size_t log = Log2(rounded_size);
917 CHECK(alloc_size <= (1UL << log));
918 if (!(alloc_size > (1UL << (log-1)))) {
919 Printf("alloc_size %zu log %zu\n", alloc_size, log);
920 }
921 CHECK(alloc_size > (1UL << (log-1)));
922 size_t res = log < kMinStackFrameSizeLog ? 0 : log - kMinStackFrameSizeLog;
923 CHECK(res < kNumberOfSizeClasses);
924 CHECK(ClassSize(res) >= rounded_size);
925 return res;
926 }
927
FifoPush(FakeFrame * node)928 void FakeFrameFifo::FifoPush(FakeFrame *node) {
929 CHECK(node);
930 node->next = 0;
931 if (first_ == 0 && last_ == 0) {
932 first_ = last_ = node;
933 } else {
934 CHECK(first_);
935 CHECK(last_);
936 last_->next = node;
937 last_ = node;
938 }
939 }
940
FifoPop()941 FakeFrame *FakeFrameFifo::FifoPop() {
942 CHECK(first_ && last_ && "Exhausted fake stack");
943 FakeFrame *res = 0;
944 if (first_ == last_) {
945 res = first_;
946 first_ = last_ = 0;
947 } else {
948 res = first_;
949 first_ = first_->next;
950 }
951 return res;
952 }
953
Init(size_t stack_size)954 void FakeStack::Init(size_t stack_size) {
955 stack_size_ = stack_size;
956 alive_ = true;
957 }
958
Cleanup()959 void FakeStack::Cleanup() {
960 alive_ = false;
961 for (size_t i = 0; i < kNumberOfSizeClasses; i++) {
962 uintptr_t mem = allocated_size_classes_[i];
963 if (mem) {
964 PoisonShadow(mem, ClassMmapSize(i), 0);
965 allocated_size_classes_[i] = 0;
966 AsanUnmapOrDie((void*)mem, ClassMmapSize(i));
967 }
968 }
969 }
970
ClassMmapSize(size_t size_class)971 size_t FakeStack::ClassMmapSize(size_t size_class) {
972 return RoundUpToPowerOfTwo(stack_size_);
973 }
974
AllocateOneSizeClass(size_t size_class)975 void FakeStack::AllocateOneSizeClass(size_t size_class) {
976 CHECK(ClassMmapSize(size_class) >= kPageSize);
977 uintptr_t new_mem = (uintptr_t)AsanMmapSomewhereOrDie(
978 ClassMmapSize(size_class), __FUNCTION__);
979 // Printf("T%d new_mem[%zu]: %p-%p mmap %zu\n",
980 // asanThreadRegistry().GetCurrent()->tid(),
981 // size_class, new_mem, new_mem + ClassMmapSize(size_class),
982 // ClassMmapSize(size_class));
983 size_t i;
984 for (i = 0; i < ClassMmapSize(size_class);
985 i += ClassSize(size_class)) {
986 size_classes_[size_class].FifoPush((FakeFrame*)(new_mem + i));
987 }
988 CHECK(i == ClassMmapSize(size_class));
989 allocated_size_classes_[size_class] = new_mem;
990 }
991
AllocateStack(size_t size,size_t real_stack)992 uintptr_t FakeStack::AllocateStack(size_t size, size_t real_stack) {
993 if (!alive_) return real_stack;
994 CHECK(size <= kMaxStackMallocSize && size > 1);
995 size_t size_class = ComputeSizeClass(size);
996 if (!allocated_size_classes_[size_class]) {
997 AllocateOneSizeClass(size_class);
998 }
999 FakeFrame *fake_frame = size_classes_[size_class].FifoPop();
1000 CHECK(fake_frame);
1001 fake_frame->size_minus_one = size - 1;
1002 fake_frame->real_stack = real_stack;
1003 while (FakeFrame *top = call_stack_.top()) {
1004 if (top->real_stack > real_stack) break;
1005 call_stack_.LifoPop();
1006 DeallocateFrame(top);
1007 }
1008 call_stack_.LifoPush(fake_frame);
1009 uintptr_t ptr = (uintptr_t)fake_frame;
1010 PoisonShadow(ptr, size, 0);
1011 return ptr;
1012 }
1013
DeallocateFrame(FakeFrame * fake_frame)1014 void FakeStack::DeallocateFrame(FakeFrame *fake_frame) {
1015 CHECK(alive_);
1016 size_t size = fake_frame->size_minus_one + 1;
1017 size_t size_class = ComputeSizeClass(size);
1018 CHECK(allocated_size_classes_[size_class]);
1019 uintptr_t ptr = (uintptr_t)fake_frame;
1020 CHECK(AddrIsInSizeClass(ptr, size_class));
1021 CHECK(AddrIsInSizeClass(ptr + size - 1, size_class));
1022 size_classes_[size_class].FifoPush(fake_frame);
1023 }
1024
OnFree(size_t ptr,size_t size,size_t real_stack)1025 void FakeStack::OnFree(size_t ptr, size_t size, size_t real_stack) {
1026 FakeFrame *fake_frame = (FakeFrame*)ptr;
1027 CHECK(fake_frame->magic = kRetiredStackFrameMagic);
1028 CHECK(fake_frame->descr != 0);
1029 CHECK(fake_frame->size_minus_one == size - 1);
1030 PoisonShadow(ptr, size, kAsanStackAfterReturnMagic);
1031 }
1032
1033 } // namespace __asan
1034
1035 // ---------------------- Interface ---------------- {{{1
1036 using namespace __asan; // NOLINT
1037
__asan_stack_malloc(size_t size,size_t real_stack)1038 size_t __asan_stack_malloc(size_t size, size_t real_stack) {
1039 if (!FLAG_use_fake_stack) return real_stack;
1040 AsanThread *t = asanThreadRegistry().GetCurrent();
1041 if (!t) {
1042 // TSD is gone, use the real stack.
1043 return real_stack;
1044 }
1045 size_t ptr = t->fake_stack().AllocateStack(size, real_stack);
1046 // Printf("__asan_stack_malloc %p %zu %p\n", ptr, size, real_stack);
1047 return ptr;
1048 }
1049
__asan_stack_free(size_t ptr,size_t size,size_t real_stack)1050 void __asan_stack_free(size_t ptr, size_t size, size_t real_stack) {
1051 if (!FLAG_use_fake_stack) return;
1052 if (ptr != real_stack) {
1053 FakeStack::OnFree(ptr, size, real_stack);
1054 }
1055 }
1056
1057 // ASan allocator doesn't reserve extra bytes, so normally we would
1058 // just return "size".
__asan_get_estimated_allocated_size(size_t size)1059 size_t __asan_get_estimated_allocated_size(size_t size) {
1060 if (size == 0) return 1;
1061 return Min(size, kMaxAllowedMallocSize);
1062 }
1063
__asan_get_ownership(const void * p)1064 bool __asan_get_ownership(const void *p) {
1065 return malloc_info.AllocationSize((uintptr_t)p) > 0;
1066 }
1067
__asan_get_allocated_size(const void * p)1068 size_t __asan_get_allocated_size(const void *p) {
1069 if (p == NULL) return 0;
1070 size_t allocated_size = malloc_info.AllocationSize((uintptr_t)p);
1071 // Die if p is not malloced or if it is already freed.
1072 if (allocated_size == 0) {
1073 Report("ERROR: AddressSanitizer attempting to call "
1074 "__asan_get_allocated_size() for pointer which is "
1075 "not owned: %p\n", p);
1076 PRINT_CURRENT_STACK();
1077 Describe((uintptr_t)p, 1);
1078 ShowStatsAndAbort();
1079 }
1080 return allocated_size;
1081 }
1082