• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- msan_allocator.cc --------------------------- ---------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of MemorySanitizer.
11 //
12 // MemorySanitizer allocator.
13 //===----------------------------------------------------------------------===//
14 
15 #include "sanitizer_common/sanitizer_allocator.h"
16 #include "sanitizer_common/sanitizer_allocator_interface.h"
17 #include "sanitizer_common/sanitizer_stackdepot.h"
18 #include "msan.h"
19 #include "msan_allocator.h"
20 #include "msan_chained_origin_depot.h"
21 #include "msan_origin.h"
22 #include "msan_thread.h"
23 
24 namespace __msan {
25 
26 struct Metadata {
27   uptr requested_size;
28 };
29 
30 struct MsanMapUnmapCallback {
OnMap__msan::MsanMapUnmapCallback31   void OnMap(uptr p, uptr size) const {}
OnUnmap__msan::MsanMapUnmapCallback32   void OnUnmap(uptr p, uptr size) const {
33     __msan_unpoison((void *)p, size);
34 
35     // We are about to unmap a chunk of user memory.
36     // Mark the corresponding shadow memory as not needed.
37     FlushUnneededShadowMemory(MEM_TO_SHADOW(p), size);
38     if (__msan_get_track_origins())
39       FlushUnneededShadowMemory(MEM_TO_ORIGIN(p), size);
40   }
41 };
42 
43 static const uptr kAllocatorSpace = 0x600000000000ULL;
44 static const uptr kAllocatorSize   = 0x80000000000;  // 8T.
45 static const uptr kMetadataSize  = sizeof(Metadata);
46 static const uptr kMaxAllowedMallocSize = 8UL << 30;
47 
48 typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, kMetadataSize,
49                              DefaultSizeClassMap,
50                              MsanMapUnmapCallback> PrimaryAllocator;
51 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
52 typedef LargeMmapAllocator<MsanMapUnmapCallback> SecondaryAllocator;
53 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
54                           SecondaryAllocator> Allocator;
55 
56 static Allocator allocator;
57 static AllocatorCache fallback_allocator_cache;
58 static SpinMutex fallback_mutex;
59 
60 static int inited = 0;
61 
Init()62 static inline void Init() {
63   if (inited) return;
64   __msan_init();
65   inited = true;  // this must happen before any threads are created.
66   allocator.Init();
67 }
68 
GetAllocatorCache(MsanThreadLocalMallocStorage * ms)69 AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
70   CHECK(ms);
71   CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
72   return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
73 }
74 
CommitBack()75 void MsanThreadLocalMallocStorage::CommitBack() {
76   allocator.SwallowCache(GetAllocatorCache(this));
77 }
78 
MsanAllocate(StackTrace * stack,uptr size,uptr alignment,bool zeroise)79 static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
80                           bool zeroise) {
81   Init();
82   if (size > kMaxAllowedMallocSize) {
83     Report("WARNING: MemorySanitizer failed to allocate %p bytes\n",
84            (void *)size);
85     return AllocatorReturnNull();
86   }
87   MsanThread *t = GetCurrentThread();
88   void *allocated;
89   if (t) {
90     AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
91     allocated = allocator.Allocate(cache, size, alignment, false);
92   } else {
93     SpinMutexLock l(&fallback_mutex);
94     AllocatorCache *cache = &fallback_allocator_cache;
95     allocated = allocator.Allocate(cache, size, alignment, false);
96   }
97   Metadata *meta =
98       reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
99   meta->requested_size = size;
100   if (zeroise) {
101     __msan_clear_and_unpoison(allocated, size);
102   } else if (flags()->poison_in_malloc) {
103     __msan_poison(allocated, size);
104     if (__msan_get_track_origins()) {
105       u32 stack_id = StackDepotPut(stack->trace, stack->size);
106       CHECK(stack_id);
107       u32 id;
108       ChainedOriginDepotPut(stack_id, Origin::kHeapRoot, &id);
109       __msan_set_origin(allocated, size, Origin(id, 1).raw_id());
110     }
111   }
112   MSAN_MALLOC_HOOK(allocated, size);
113   return allocated;
114 }
115 
MsanDeallocate(StackTrace * stack,void * p)116 void MsanDeallocate(StackTrace *stack, void *p) {
117   CHECK(p);
118   Init();
119   MSAN_FREE_HOOK(p);
120   Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));
121   uptr size = meta->requested_size;
122   meta->requested_size = 0;
123   // This memory will not be reused by anyone else, so we are free to keep it
124   // poisoned.
125   if (flags()->poison_in_free) {
126     __msan_poison(p, size);
127     if (__msan_get_track_origins()) {
128       u32 stack_id = StackDepotPut(stack->trace, stack->size);
129       CHECK(stack_id);
130       u32 id;
131       ChainedOriginDepotPut(stack_id, Origin::kHeapRoot, &id);
132       __msan_set_origin(p, size, Origin(id, 1).raw_id());
133     }
134   }
135   MsanThread *t = GetCurrentThread();
136   if (t) {
137     AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
138     allocator.Deallocate(cache, p);
139   } else {
140     SpinMutexLock l(&fallback_mutex);
141     AllocatorCache *cache = &fallback_allocator_cache;
142     allocator.Deallocate(cache, p);
143   }
144 }
145 
MsanReallocate(StackTrace * stack,void * old_p,uptr new_size,uptr alignment,bool zeroise)146 void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
147                      uptr alignment, bool zeroise) {
148   if (!old_p)
149     return MsanAllocate(stack, new_size, alignment, zeroise);
150   if (!new_size) {
151     MsanDeallocate(stack, old_p);
152     return 0;
153   }
154   Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));
155   uptr old_size = meta->requested_size;
156   uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);
157   if (new_size <= actually_allocated_size) {
158     // We are not reallocating here.
159     meta->requested_size = new_size;
160     if (new_size > old_size)
161       __msan_poison((char*)old_p + old_size, new_size - old_size);
162     return old_p;
163   }
164   uptr memcpy_size = Min(new_size, old_size);
165   void *new_p = MsanAllocate(stack, new_size, alignment, zeroise);
166   // Printf("realloc: old_size %zd new_size %zd\n", old_size, new_size);
167   if (new_p) {
168     __msan_memcpy(new_p, old_p, memcpy_size);
169     MsanDeallocate(stack, old_p);
170   }
171   return new_p;
172 }
173 
AllocationSize(const void * p)174 static uptr AllocationSize(const void *p) {
175   if (p == 0) return 0;
176   const void *beg = allocator.GetBlockBegin(p);
177   if (beg != p) return 0;
178   Metadata *b = (Metadata *)allocator.GetMetaData(p);
179   return b->requested_size;
180 }
181 
182 }  // namespace __msan
183 
184 using namespace __msan;
185 
__sanitizer_get_current_allocated_bytes()186 uptr __sanitizer_get_current_allocated_bytes() {
187   uptr stats[AllocatorStatCount];
188   allocator.GetStats(stats);
189   return stats[AllocatorStatAllocated];
190 }
__msan_get_current_allocated_bytes()191 uptr __msan_get_current_allocated_bytes() {
192   return __sanitizer_get_current_allocated_bytes();
193 }
194 
__sanitizer_get_heap_size()195 uptr __sanitizer_get_heap_size() {
196   uptr stats[AllocatorStatCount];
197   allocator.GetStats(stats);
198   return stats[AllocatorStatMapped];
199 }
__msan_get_heap_size()200 uptr __msan_get_heap_size() {
201   return __sanitizer_get_heap_size();
202 }
203 
__sanitizer_get_free_bytes()204 uptr __sanitizer_get_free_bytes() { return 1; }
__msan_get_free_bytes()205 uptr __msan_get_free_bytes() {
206   return __sanitizer_get_free_bytes();
207 }
208 
__sanitizer_get_unmapped_bytes()209 uptr __sanitizer_get_unmapped_bytes() { return 1; }
__msan_get_unmapped_bytes()210 uptr __msan_get_unmapped_bytes() {
211   return __sanitizer_get_unmapped_bytes();
212 }
213 
__sanitizer_get_estimated_allocated_size(uptr size)214 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
__msan_get_estimated_allocated_size(uptr size)215 uptr __msan_get_estimated_allocated_size(uptr size) {
216   return __sanitizer_get_estimated_allocated_size(size);
217 }
218 
__sanitizer_get_ownership(const void * p)219 int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
__msan_get_ownership(const void * p)220 int __msan_get_ownership(const void *p) {
221   return __sanitizer_get_ownership(p);
222 }
223 
__sanitizer_get_allocated_size(const void * p)224 uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
__msan_get_allocated_size(const void * p)225 uptr __msan_get_allocated_size(const void *p) {
226   return __sanitizer_get_allocated_size(p);
227 }
228