1 //===-- hwasan_allocator.cpp ------------------------ ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of HWAddressSanitizer.
10 //
11 // HWAddressSanitizer allocator.
12 //===----------------------------------------------------------------------===//
13
14 #include "sanitizer_common/sanitizer_atomic.h"
15 #include "sanitizer_common/sanitizer_errno.h"
16 #include "sanitizer_common/sanitizer_stackdepot.h"
17 #include "hwasan.h"
18 #include "hwasan_allocator.h"
19 #include "hwasan_checks.h"
20 #include "hwasan_mapping.h"
21 #include "hwasan_malloc_bisect.h"
22 #include "hwasan_thread.h"
23 #include "hwasan_report.h"
24
25 namespace __hwasan {
26
27 static Allocator allocator;
28 static AllocatorCache fallback_allocator_cache;
29 static SpinMutex fallback_mutex;
30 static atomic_uint8_t hwasan_allocator_tagging_enabled;
31
32 static const tag_t kFallbackAllocTag = 0xBB;
33 static const tag_t kFallbackFreeTag = 0xBC;
34
35 enum RightAlignMode {
36 kRightAlignNever,
37 kRightAlignSometimes,
38 kRightAlignAlways
39 };
40
41 // Initialized in HwasanAllocatorInit, an never changed.
42 static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1];
43
IsAllocated() const44 bool HwasanChunkView::IsAllocated() const {
45 return metadata_ && metadata_->alloc_context_id &&
46 metadata_->get_requested_size();
47 }
48
49 // Aligns the 'addr' right to the granule boundary.
AlignRight(uptr addr,uptr requested_size)50 static uptr AlignRight(uptr addr, uptr requested_size) {
51 uptr tail_size = requested_size % kShadowAlignment;
52 if (!tail_size) return addr;
53 return addr + kShadowAlignment - tail_size;
54 }
55
Beg() const56 uptr HwasanChunkView::Beg() const {
57 if (metadata_ && metadata_->right_aligned)
58 return AlignRight(block_, metadata_->get_requested_size());
59 return block_;
60 }
End() const61 uptr HwasanChunkView::End() const {
62 return Beg() + UsedSize();
63 }
UsedSize() const64 uptr HwasanChunkView::UsedSize() const {
65 return metadata_->get_requested_size();
66 }
GetAllocStackId() const67 u32 HwasanChunkView::GetAllocStackId() const {
68 return metadata_->alloc_context_id;
69 }
70
ActualSize() const71 uptr HwasanChunkView::ActualSize() const {
72 return allocator.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_));
73 }
74
FromSmallHeap() const75 bool HwasanChunkView::FromSmallHeap() const {
76 return allocator.FromPrimary(reinterpret_cast<void *>(block_));
77 }
78
GetAllocatorStats(AllocatorStatCounters s)79 void GetAllocatorStats(AllocatorStatCounters s) {
80 allocator.GetStats(s);
81 }
82
HwasanAllocatorInit()83 void HwasanAllocatorInit() {
84 atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
85 !flags()->disable_allocator_tagging);
86 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
87 allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
88 for (uptr i = 0; i < sizeof(tail_magic); i++)
89 tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
90 }
91
AllocatorSwallowThreadLocalCache(AllocatorCache * cache)92 void AllocatorSwallowThreadLocalCache(AllocatorCache *cache) {
93 allocator.SwallowCache(cache);
94 }
95
TaggedSize(uptr size)96 static uptr TaggedSize(uptr size) {
97 if (!size) size = 1;
98 uptr new_size = RoundUpTo(size, kShadowAlignment);
99 CHECK_GE(new_size, size);
100 return new_size;
101 }
102
HwasanAllocate(StackTrace * stack,uptr orig_size,uptr alignment,bool zeroise)103 static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
104 bool zeroise) {
105 if (orig_size > kMaxAllowedMallocSize) {
106 if (AllocatorMayReturnNull()) {
107 Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
108 orig_size);
109 return nullptr;
110 }
111 ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack);
112 }
113
114 alignment = Max(alignment, kShadowAlignment);
115 uptr size = TaggedSize(orig_size);
116 Thread *t = GetCurrentThread();
117 void *allocated;
118 if (t) {
119 allocated = allocator.Allocate(t->allocator_cache(), size, alignment);
120 } else {
121 SpinMutexLock l(&fallback_mutex);
122 AllocatorCache *cache = &fallback_allocator_cache;
123 allocated = allocator.Allocate(cache, size, alignment);
124 }
125 if (UNLIKELY(!allocated)) {
126 SetAllocatorOutOfMemory();
127 if (AllocatorMayReturnNull())
128 return nullptr;
129 ReportOutOfMemory(size, stack);
130 }
131 Metadata *meta =
132 reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
133 meta->set_requested_size(orig_size);
134 meta->alloc_context_id = StackDepotPut(*stack);
135 meta->right_aligned = false;
136 if (zeroise) {
137 internal_memset(allocated, 0, size);
138 } else if (flags()->max_malloc_fill_size > 0) {
139 uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
140 internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
141 }
142 if (size != orig_size) {
143 internal_memcpy(reinterpret_cast<u8 *>(allocated) + orig_size, tail_magic,
144 size - orig_size - 1);
145 }
146
147 void *user_ptr = allocated;
148 // Tagging can only be skipped when both tag_in_malloc and tag_in_free are
149 // false. When tag_in_malloc = false and tag_in_free = true malloc needs to
150 // retag to 0.
151 if ((flags()->tag_in_malloc || flags()->tag_in_free) &&
152 atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
153 if (flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
154 tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
155 uptr tag_size = orig_size ? orig_size : 1;
156 uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
157 user_ptr =
158 (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
159 if (full_granule_size != tag_size) {
160 u8 *short_granule =
161 reinterpret_cast<u8 *>(allocated) + full_granule_size;
162 TagMemoryAligned((uptr)short_granule, kShadowAlignment,
163 tag_size % kShadowAlignment);
164 short_granule[kShadowAlignment - 1] = tag;
165 }
166 } else {
167 user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
168 }
169 }
170
171 HWASAN_MALLOC_HOOK(user_ptr, size);
172 return user_ptr;
173 }
174
PointerAndMemoryTagsMatch(void * tagged_ptr)175 static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
176 CHECK(tagged_ptr);
177 uptr tagged_uptr = reinterpret_cast<uptr>(tagged_ptr);
178 tag_t mem_tag = *reinterpret_cast<tag_t *>(
179 MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
180 return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1);
181 }
182
HwasanDeallocate(StackTrace * stack,void * tagged_ptr)183 static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
184 CHECK(tagged_ptr);
185 HWASAN_FREE_HOOK(tagged_ptr);
186
187 if (!PointerAndMemoryTagsMatch(tagged_ptr))
188 ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
189
190 void *untagged_ptr = UntagPtr(tagged_ptr);
191 void *aligned_ptr = reinterpret_cast<void *>(
192 RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
193 Metadata *meta =
194 reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
195 uptr orig_size = meta->get_requested_size();
196 u32 free_context_id = StackDepotPut(*stack);
197 u32 alloc_context_id = meta->alloc_context_id;
198
199 // Check tail magic.
200 uptr tagged_size = TaggedSize(orig_size);
201 if (flags()->free_checks_tail_magic && orig_size &&
202 tagged_size != orig_size) {
203 uptr tail_size = tagged_size - orig_size - 1;
204 CHECK_LT(tail_size, kShadowAlignment);
205 void *tail_beg = reinterpret_cast<void *>(
206 reinterpret_cast<uptr>(aligned_ptr) + orig_size);
207 if (tail_size && internal_memcmp(tail_beg, tail_magic, tail_size))
208 ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr),
209 orig_size, tail_magic);
210 }
211
212 meta->set_requested_size(0);
213 meta->alloc_context_id = 0;
214 // This memory will not be reused by anyone else, so we are free to keep it
215 // poisoned.
216 Thread *t = GetCurrentThread();
217 if (flags()->max_free_fill_size > 0) {
218 uptr fill_size =
219 Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
220 internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
221 }
222 if (flags()->tag_in_free && malloc_bisect(stack, 0) &&
223 atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
224 TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
225 t ? t->GenerateRandomTag() : kFallbackFreeTag);
226 if (t) {
227 allocator.Deallocate(t->allocator_cache(), aligned_ptr);
228 if (auto *ha = t->heap_allocations())
229 ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_context_id,
230 free_context_id, static_cast<u32>(orig_size)});
231 } else {
232 SpinMutexLock l(&fallback_mutex);
233 AllocatorCache *cache = &fallback_allocator_cache;
234 allocator.Deallocate(cache, aligned_ptr);
235 }
236 }
237
HwasanReallocate(StackTrace * stack,void * tagged_ptr_old,uptr new_size,uptr alignment)238 static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
239 uptr new_size, uptr alignment) {
240 if (!PointerAndMemoryTagsMatch(tagged_ptr_old))
241 ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr_old));
242
243 void *tagged_ptr_new =
244 HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
245 if (tagged_ptr_old && tagged_ptr_new) {
246 void *untagged_ptr_old = UntagPtr(tagged_ptr_old);
247 Metadata *meta =
248 reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
249 internal_memcpy(
250 UntagPtr(tagged_ptr_new), untagged_ptr_old,
251 Min(new_size, static_cast<uptr>(meta->get_requested_size())));
252 HwasanDeallocate(stack, tagged_ptr_old);
253 }
254 return tagged_ptr_new;
255 }
256
HwasanCalloc(StackTrace * stack,uptr nmemb,uptr size)257 static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
258 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
259 if (AllocatorMayReturnNull())
260 return nullptr;
261 ReportCallocOverflow(nmemb, size, stack);
262 }
263 return HwasanAllocate(stack, nmemb * size, sizeof(u64), true);
264 }
265
FindHeapChunkByAddress(uptr address)266 HwasanChunkView FindHeapChunkByAddress(uptr address) {
267 void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));
268 if (!block)
269 return HwasanChunkView();
270 Metadata *metadata =
271 reinterpret_cast<Metadata*>(allocator.GetMetaData(block));
272 return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
273 }
274
AllocationSize(const void * tagged_ptr)275 static uptr AllocationSize(const void *tagged_ptr) {
276 const void *untagged_ptr = UntagPtr(tagged_ptr);
277 if (!untagged_ptr) return 0;
278 const void *beg = allocator.GetBlockBegin(untagged_ptr);
279 Metadata *b = (Metadata *)allocator.GetMetaData(untagged_ptr);
280 if (b->right_aligned) {
281 if (beg != reinterpret_cast<void *>(RoundDownTo(
282 reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment)))
283 return 0;
284 } else {
285 if (beg != untagged_ptr) return 0;
286 }
287 return b->get_requested_size();
288 }
289
hwasan_malloc(uptr size,StackTrace * stack)290 void *hwasan_malloc(uptr size, StackTrace *stack) {
291 return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
292 }
293
hwasan_calloc(uptr nmemb,uptr size,StackTrace * stack)294 void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
295 return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size));
296 }
297
hwasan_realloc(void * ptr,uptr size,StackTrace * stack)298 void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {
299 if (!ptr)
300 return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
301 if (size == 0) {
302 HwasanDeallocate(stack, ptr);
303 return nullptr;
304 }
305 return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64)));
306 }
307
hwasan_reallocarray(void * ptr,uptr nmemb,uptr size,StackTrace * stack)308 void *hwasan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
309 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
310 errno = errno_ENOMEM;
311 if (AllocatorMayReturnNull())
312 return nullptr;
313 ReportReallocArrayOverflow(nmemb, size, stack);
314 }
315 return hwasan_realloc(ptr, nmemb * size, stack);
316 }
317
hwasan_valloc(uptr size,StackTrace * stack)318 void *hwasan_valloc(uptr size, StackTrace *stack) {
319 return SetErrnoOnNull(
320 HwasanAllocate(stack, size, GetPageSizeCached(), false));
321 }
322
hwasan_pvalloc(uptr size,StackTrace * stack)323 void *hwasan_pvalloc(uptr size, StackTrace *stack) {
324 uptr PageSize = GetPageSizeCached();
325 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
326 errno = errno_ENOMEM;
327 if (AllocatorMayReturnNull())
328 return nullptr;
329 ReportPvallocOverflow(size, stack);
330 }
331 // pvalloc(0) should allocate one page.
332 size = size ? RoundUpTo(size, PageSize) : PageSize;
333 return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false));
334 }
335
hwasan_aligned_alloc(uptr alignment,uptr size,StackTrace * stack)336 void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
337 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
338 errno = errno_EINVAL;
339 if (AllocatorMayReturnNull())
340 return nullptr;
341 ReportInvalidAlignedAllocAlignment(size, alignment, stack);
342 }
343 return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
344 }
345
hwasan_memalign(uptr alignment,uptr size,StackTrace * stack)346 void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) {
347 if (UNLIKELY(!IsPowerOfTwo(alignment))) {
348 errno = errno_EINVAL;
349 if (AllocatorMayReturnNull())
350 return nullptr;
351 ReportInvalidAllocationAlignment(alignment, stack);
352 }
353 return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
354 }
355
hwasan_posix_memalign(void ** memptr,uptr alignment,uptr size,StackTrace * stack)356 int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
357 StackTrace *stack) {
358 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
359 if (AllocatorMayReturnNull())
360 return errno_EINVAL;
361 ReportInvalidPosixMemalignAlignment(alignment, stack);
362 }
363 void *ptr = HwasanAllocate(stack, size, alignment, false);
364 if (UNLIKELY(!ptr))
365 // OOM error is already taken care of by HwasanAllocate.
366 return errno_ENOMEM;
367 CHECK(IsAligned((uptr)ptr, alignment));
368 *(void **)UntagPtr(memptr) = ptr;
369 return 0;
370 }
371
hwasan_free(void * ptr,StackTrace * stack)372 void hwasan_free(void *ptr, StackTrace *stack) {
373 return HwasanDeallocate(stack, ptr);
374 }
375
376 } // namespace __hwasan
377
378 using namespace __hwasan;
379
__hwasan_enable_allocator_tagging()380 void __hwasan_enable_allocator_tagging() {
381 atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1);
382 }
383
__hwasan_disable_allocator_tagging()384 void __hwasan_disable_allocator_tagging() {
385 atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0);
386 }
387
__sanitizer_get_current_allocated_bytes()388 uptr __sanitizer_get_current_allocated_bytes() {
389 uptr stats[AllocatorStatCount];
390 allocator.GetStats(stats);
391 return stats[AllocatorStatAllocated];
392 }
393
__sanitizer_get_heap_size()394 uptr __sanitizer_get_heap_size() {
395 uptr stats[AllocatorStatCount];
396 allocator.GetStats(stats);
397 return stats[AllocatorStatMapped];
398 }
399
__sanitizer_get_free_bytes()400 uptr __sanitizer_get_free_bytes() { return 1; }
401
__sanitizer_get_unmapped_bytes()402 uptr __sanitizer_get_unmapped_bytes() { return 1; }
403
__sanitizer_get_estimated_allocated_size(uptr size)404 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
405
__sanitizer_get_ownership(const void * p)406 int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
407
__sanitizer_get_allocated_size(const void * p)408 uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
409