1 //===-- asan_allocator.h ----------------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file is a part of AddressSanitizer, an address sanity checker. 11 // 12 // ASan-private header for asan_allocator.cc. 13 //===----------------------------------------------------------------------===// 14 15 #ifndef ASAN_ALLOCATOR_H 16 #define ASAN_ALLOCATOR_H 17 18 #include "asan_internal.h" 19 #include "asan_interceptors.h" 20 #include "sanitizer_common/sanitizer_list.h" 21 22 // We are in the process of transitioning from the old allocator (version 1) 23 // to a new one (version 2). The change is quite intrusive so both allocators 24 // will co-exist in the source base for a while. The actual allocator is chosen 25 // at build time by redefining this macro. 26 #ifndef ASAN_ALLOCATOR_VERSION 27 #define ASAN_ALLOCATOR_VERSION 2 28 #endif // ASAN_ALLOCATOR_VERSION 29 30 namespace __asan { 31 32 enum AllocType { 33 FROM_MALLOC = 1, // Memory block came from malloc, calloc, realloc, etc. 34 FROM_NEW = 2, // Memory block came from operator new. 35 FROM_NEW_BR = 3 // Memory block came from operator new [ ] 36 }; 37 38 static const uptr kNumberOfSizeClasses = 255; 39 struct AsanChunk; 40 41 void InitializeAllocator(); 42 43 class AsanChunkView { 44 public: AsanChunkView(AsanChunk * chunk)45 explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {} IsValid()46 bool IsValid() { return chunk_ != 0; } 47 uptr Beg(); // first byte of user memory. 48 uptr End(); // last byte of user memory. 49 uptr UsedSize(); // size requested by the user. 50 uptr AllocTid(); 51 uptr FreeTid(); 52 void GetAllocStack(StackTrace *stack); 53 void GetFreeStack(StackTrace *stack); AddrIsInside(uptr addr,uptr access_size,sptr * offset)54 bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) { 55 if (addr >= Beg() && (addr + access_size) <= End()) { 56 *offset = addr - Beg(); 57 return true; 58 } 59 return false; 60 } AddrIsAtLeft(uptr addr,uptr access_size,sptr * offset)61 bool AddrIsAtLeft(uptr addr, uptr access_size, sptr *offset) { 62 (void)access_size; 63 if (addr < Beg()) { 64 *offset = Beg() - addr; 65 return true; 66 } 67 return false; 68 } AddrIsAtRight(uptr addr,uptr access_size,sptr * offset)69 bool AddrIsAtRight(uptr addr, uptr access_size, sptr *offset) { 70 if (addr + access_size > End()) { 71 *offset = addr - End(); 72 return true; 73 } 74 return false; 75 } 76 77 private: 78 AsanChunk *const chunk_; 79 }; 80 81 AsanChunkView FindHeapChunkByAddress(uptr address); 82 83 // List of AsanChunks with total size. 84 class AsanChunkFifoList: public IntrusiveList<AsanChunk> { 85 public: AsanChunkFifoList(LinkerInitialized)86 explicit AsanChunkFifoList(LinkerInitialized) { } AsanChunkFifoList()87 AsanChunkFifoList() { clear(); } 88 void Push(AsanChunk *n); 89 void PushList(AsanChunkFifoList *q); 90 AsanChunk *Pop(); size()91 uptr size() { return size_; } clear()92 void clear() { 93 IntrusiveList<AsanChunk>::clear(); 94 size_ = 0; 95 } 96 private: 97 uptr size_; 98 }; 99 100 struct AsanThreadLocalMallocStorage { AsanThreadLocalMallocStorageAsanThreadLocalMallocStorage101 explicit AsanThreadLocalMallocStorage(LinkerInitialized x) 102 #if ASAN_ALLOCATOR_VERSION == 1 103 : quarantine_(x) 104 #endif 105 { } AsanThreadLocalMallocStorageAsanThreadLocalMallocStorage106 AsanThreadLocalMallocStorage() { 107 CHECK(REAL(memset)); 108 REAL(memset)(this, 0, sizeof(AsanThreadLocalMallocStorage)); 109 } 110 111 #if ASAN_ALLOCATOR_VERSION == 1 112 AsanChunkFifoList quarantine_; 113 AsanChunk *free_lists_[kNumberOfSizeClasses]; 114 #else 115 uptr quarantine_cache[16]; 116 uptr allocator2_cache[96 * (512 * 8 + 16)]; // Opaque. 117 #endif 118 void CommitBack(); 119 }; 120 121 // Fake stack frame contains local variables of one function. 122 // This struct should fit into a stack redzone (32 bytes). 123 struct FakeFrame { 124 uptr magic; // Modified by the instrumented code. 125 uptr descr; // Modified by the instrumented code. 126 FakeFrame *next; 127 u64 real_stack : 48; 128 u64 size_minus_one : 16; 129 }; 130 131 struct FakeFrameFifo { 132 public: 133 void FifoPush(FakeFrame *node); 134 FakeFrame *FifoPop(); 135 private: 136 FakeFrame *first_, *last_; 137 }; 138 139 class FakeFrameLifo { 140 public: LifoPush(FakeFrame * node)141 void LifoPush(FakeFrame *node) { 142 node->next = top_; 143 top_ = node; 144 } LifoPop()145 void LifoPop() { 146 CHECK(top_); 147 top_ = top_->next; 148 } top()149 FakeFrame *top() { return top_; } 150 private: 151 FakeFrame *top_; 152 }; 153 154 // For each thread we create a fake stack and place stack objects on this fake 155 // stack instead of the real stack. The fake stack is not really a stack but 156 // a fast malloc-like allocator so that when a function exits the fake stack 157 // is not poped but remains there for quite some time until gets used again. 158 // So, we poison the objects on the fake stack when function returns. 159 // It helps us find use-after-return bugs. 160 // We can not rely on __asan_stack_free being called on every function exit, 161 // so we maintain a lifo list of all current fake frames and update it on every 162 // call to __asan_stack_malloc. 163 class FakeStack { 164 public: 165 FakeStack(); FakeStack(LinkerInitialized)166 explicit FakeStack(LinkerInitialized) {} 167 void Init(uptr stack_size); StopUsingFakeStack()168 void StopUsingFakeStack() { alive_ = false; } 169 void Cleanup(); 170 uptr AllocateStack(uptr size, uptr real_stack); 171 static void OnFree(uptr ptr, uptr size, uptr real_stack); 172 // Return the bottom of the maped region. 173 uptr AddrIsInFakeStack(uptr addr); StackSize()174 bool StackSize() { return stack_size_; } 175 176 private: 177 static const uptr kMinStackFrameSizeLog = 9; // Min frame is 512B. 178 static const uptr kMaxStackFrameSizeLog = 16; // Max stack frame is 64K. 179 static const uptr kMaxStackMallocSize = 1 << kMaxStackFrameSizeLog; 180 static const uptr kNumberOfSizeClasses = 181 kMaxStackFrameSizeLog - kMinStackFrameSizeLog + 1; 182 183 bool AddrIsInSizeClass(uptr addr, uptr size_class); 184 185 // Each size class should be large enough to hold all frames. 186 uptr ClassMmapSize(uptr size_class); 187 ClassSize(uptr size_class)188 uptr ClassSize(uptr size_class) { 189 return 1UL << (size_class + kMinStackFrameSizeLog); 190 } 191 192 void DeallocateFrame(FakeFrame *fake_frame); 193 194 uptr ComputeSizeClass(uptr alloc_size); 195 void AllocateOneSizeClass(uptr size_class); 196 197 uptr stack_size_; 198 bool alive_; 199 200 uptr allocated_size_classes_[kNumberOfSizeClasses]; 201 FakeFrameFifo size_classes_[kNumberOfSizeClasses]; 202 FakeFrameLifo call_stack_; 203 }; 204 205 void *asan_memalign(uptr alignment, uptr size, StackTrace *stack, 206 AllocType alloc_type); 207 void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type); 208 209 void *asan_malloc(uptr size, StackTrace *stack); 210 void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack); 211 void *asan_realloc(void *p, uptr size, StackTrace *stack); 212 void *asan_valloc(uptr size, StackTrace *stack); 213 void *asan_pvalloc(uptr size, StackTrace *stack); 214 215 int asan_posix_memalign(void **memptr, uptr alignment, uptr size, 216 StackTrace *stack); 217 uptr asan_malloc_usable_size(void *ptr, StackTrace *stack); 218 219 uptr asan_mz_size(const void *ptr); 220 void asan_mz_force_lock(); 221 void asan_mz_force_unlock(); 222 223 void PrintInternalAllocatorStats(); 224 225 } // namespace __asan 226 #endif // ASAN_ALLOCATOR_H 227