1 //===-- guarded_pool_allocator.cpp ------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "gwp_asan/guarded_pool_allocator.h"
10
11 #include "gwp_asan/optional/segv_handler.h"
12 #include "gwp_asan/options.h"
13 #include "gwp_asan/utilities.h"
14
15 // RHEL creates the PRIu64 format macro (for printing uint64_t's) only when this
16 // macro is defined before including <inttypes.h>.
17 #ifndef __STDC_FORMAT_MACROS
18 #define __STDC_FORMAT_MACROS 1
19 #endif
20
21 #include <assert.h>
22 #include <inttypes.h>
23 #include <signal.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <time.h>
28
29 using AllocationMetadata = gwp_asan::AllocationMetadata;
30 using Error = gwp_asan::Error;
31
32 namespace gwp_asan {
33 namespace {
34 // Forward declare the pointer to the singleton version of this class.
35 // Instantiated during initialisation, this allows the signal handler
36 // to find this class in order to deduce the root cause of failures. Must not be
37 // referenced by users outside this translation unit, in order to avoid
38 // init-order-fiasco.
39 GuardedPoolAllocator *SingletonPtr = nullptr;
40
roundUpTo(size_t Size,size_t Boundary)41 size_t roundUpTo(size_t Size, size_t Boundary) {
42 return (Size + Boundary - 1) & ~(Boundary - 1);
43 }
44
getPageAddr(uintptr_t Ptr,uintptr_t PageSize)45 uintptr_t getPageAddr(uintptr_t Ptr, uintptr_t PageSize) {
46 return Ptr & ~(PageSize - 1);
47 }
48 } // anonymous namespace
49
50 // Gets the singleton implementation of this class. Thread-compatible until
51 // init() is called, thread-safe afterwards.
getSingleton()52 GuardedPoolAllocator *GuardedPoolAllocator::getSingleton() {
53 return SingletonPtr;
54 }
55
init(const options::Options & Opts)56 void GuardedPoolAllocator::init(const options::Options &Opts) {
57 // Note: We return from the constructor here if GWP-ASan is not available.
58 // This will stop heap-allocation of class members, as well as mmap() of the
59 // guarded slots.
60 if (!Opts.Enabled || Opts.SampleRate == 0 ||
61 Opts.MaxSimultaneousAllocations == 0)
62 return;
63
64 Check(Opts.SampleRate >= 0, "GWP-ASan Error: SampleRate is < 0.");
65 Check(Opts.SampleRate < (1 << 30), "GWP-ASan Error: SampleRate is >= 2^30.");
66 Check(Opts.MaxSimultaneousAllocations >= 0,
67 "GWP-ASan Error: MaxSimultaneousAllocations is < 0.");
68
69 SingletonPtr = this;
70 Backtrace = Opts.Backtrace;
71
72 State.MaxSimultaneousAllocations = Opts.MaxSimultaneousAllocations;
73
74 const size_t PageSize = getPlatformPageSize();
75 // getPageAddr() and roundUpTo() assume the page size to be a power of 2.
76 assert((PageSize & (PageSize - 1)) == 0);
77 State.PageSize = PageSize;
78
79 PerfectlyRightAlign = Opts.PerfectlyRightAlign;
80
81 size_t PoolBytesRequired =
82 PageSize * (1 + State.MaxSimultaneousAllocations) +
83 State.MaxSimultaneousAllocations * State.maximumAllocationSize();
84 assert(PoolBytesRequired % PageSize == 0);
85 void *GuardedPoolMemory = reserveGuardedPool(PoolBytesRequired);
86
87 size_t BytesRequired =
88 roundUpTo(State.MaxSimultaneousAllocations * sizeof(*Metadata), PageSize);
89 Metadata = reinterpret_cast<AllocationMetadata *>(
90 map(BytesRequired, kGwpAsanMetadataName));
91
92 // Allocate memory and set up the free pages queue.
93 BytesRequired = roundUpTo(
94 State.MaxSimultaneousAllocations * sizeof(*FreeSlots), PageSize);
95 FreeSlots =
96 reinterpret_cast<size_t *>(map(BytesRequired, kGwpAsanFreeSlotsName));
97
98 // Multiply the sample rate by 2 to give a good, fast approximation for (1 /
99 // SampleRate) chance of sampling.
100 if (Opts.SampleRate != 1)
101 AdjustedSampleRatePlusOne = static_cast<uint32_t>(Opts.SampleRate) * 2 + 1;
102 else
103 AdjustedSampleRatePlusOne = 2;
104
105 initPRNG();
106 getThreadLocals()->NextSampleCounter =
107 ((getRandomUnsigned32() % (AdjustedSampleRatePlusOne - 1)) + 1) &
108 ThreadLocalPackedVariables::NextSampleCounterMask;
109
110 State.GuardedPagePool = reinterpret_cast<uintptr_t>(GuardedPoolMemory);
111 State.GuardedPagePoolEnd =
112 reinterpret_cast<uintptr_t>(GuardedPoolMemory) + PoolBytesRequired;
113
114 if (Opts.InstallForkHandlers)
115 installAtFork();
116 }
117
disable()118 void GuardedPoolAllocator::disable() { PoolMutex.lock(); }
119
enable()120 void GuardedPoolAllocator::enable() { PoolMutex.unlock(); }
121
iterate(void * Base,size_t Size,iterate_callback Cb,void * Arg)122 void GuardedPoolAllocator::iterate(void *Base, size_t Size, iterate_callback Cb,
123 void *Arg) {
124 uintptr_t Start = reinterpret_cast<uintptr_t>(Base);
125 for (size_t i = 0; i < State.MaxSimultaneousAllocations; ++i) {
126 const AllocationMetadata &Meta = Metadata[i];
127 if (Meta.Addr && !Meta.IsDeallocated && Meta.Addr >= Start &&
128 Meta.Addr < Start + Size)
129 Cb(Meta.Addr, Meta.Size, Arg);
130 }
131 }
132
uninitTestOnly()133 void GuardedPoolAllocator::uninitTestOnly() {
134 if (State.GuardedPagePool) {
135 unreserveGuardedPool();
136 State.GuardedPagePool = 0;
137 State.GuardedPagePoolEnd = 0;
138 }
139 if (Metadata) {
140 unmap(Metadata,
141 roundUpTo(State.MaxSimultaneousAllocations * sizeof(*Metadata),
142 State.PageSize));
143 Metadata = nullptr;
144 }
145 if (FreeSlots) {
146 unmap(FreeSlots,
147 roundUpTo(State.MaxSimultaneousAllocations * sizeof(*FreeSlots),
148 State.PageSize));
149 FreeSlots = nullptr;
150 }
151 *getThreadLocals() = ThreadLocalPackedVariables();
152 }
153
allocate(size_t Size)154 void *GuardedPoolAllocator::allocate(size_t Size) {
155 // GuardedPagePoolEnd == 0 when GWP-ASan is disabled. If we are disabled, fall
156 // back to the supporting allocator.
157 if (State.GuardedPagePoolEnd == 0) {
158 getThreadLocals()->NextSampleCounter =
159 (AdjustedSampleRatePlusOne - 1) &
160 ThreadLocalPackedVariables::NextSampleCounterMask;
161 return nullptr;
162 }
163
164 // Protect against recursivity.
165 if (getThreadLocals()->RecursiveGuard)
166 return nullptr;
167 ScopedRecursiveGuard SRG;
168
169 if (Size == 0 || Size > State.maximumAllocationSize())
170 return nullptr;
171
172 size_t Index;
173 {
174 ScopedLock L(PoolMutex);
175 Index = reserveSlot();
176 }
177
178 if (Index == kInvalidSlotID)
179 return nullptr;
180
181 uintptr_t Ptr = State.slotToAddr(Index);
182 // Should we right-align this allocation?
183 if (getRandomUnsigned32() % 2 == 0) {
184 AlignmentStrategy Align = AlignmentStrategy::DEFAULT;
185 if (PerfectlyRightAlign)
186 Align = AlignmentStrategy::PERFECT;
187 Ptr +=
188 State.maximumAllocationSize() - rightAlignedAllocationSize(Size, Align);
189 }
190 AllocationMetadata *Meta = addrToMetadata(Ptr);
191
192 // If a slot is multiple pages in size, and the allocation takes up a single
193 // page, we can improve overflow detection by leaving the unused pages as
194 // unmapped.
195 const size_t PageSize = State.PageSize;
196 allocateInGuardedPool(reinterpret_cast<void *>(getPageAddr(Ptr, PageSize)),
197 roundUpTo(Size, PageSize));
198
199 Meta->RecordAllocation(Ptr, Size);
200 Meta->AllocationTrace.RecordBacktrace(Backtrace);
201
202 return reinterpret_cast<void *>(Ptr);
203 }
204
trapOnAddress(uintptr_t Address,Error E)205 void GuardedPoolAllocator::trapOnAddress(uintptr_t Address, Error E) {
206 State.FailureType = E;
207 State.FailureAddress = Address;
208
209 // Raise a SEGV by touching first guard page.
210 volatile char *p = reinterpret_cast<char *>(State.GuardedPagePool);
211 *p = 0;
212 __builtin_unreachable();
213 }
214
stop()215 void GuardedPoolAllocator::stop() {
216 getThreadLocals()->RecursiveGuard = true;
217 PoolMutex.tryLock();
218 }
219
deallocate(void * Ptr)220 void GuardedPoolAllocator::deallocate(void *Ptr) {
221 assert(pointerIsMine(Ptr) && "Pointer is not mine!");
222 uintptr_t UPtr = reinterpret_cast<uintptr_t>(Ptr);
223 size_t Slot = State.getNearestSlot(UPtr);
224 uintptr_t SlotStart = State.slotToAddr(Slot);
225 AllocationMetadata *Meta = addrToMetadata(UPtr);
226 if (Meta->Addr != UPtr) {
227 // If multiple errors occur at the same time, use the first one.
228 ScopedLock L(PoolMutex);
229 trapOnAddress(UPtr, Error::INVALID_FREE);
230 }
231
232 // Intentionally scope the mutex here, so that other threads can access the
233 // pool during the expensive markInaccessible() call.
234 {
235 ScopedLock L(PoolMutex);
236 if (Meta->IsDeallocated) {
237 trapOnAddress(UPtr, Error::DOUBLE_FREE);
238 }
239
240 // Ensure that the deallocation is recorded before marking the page as
241 // inaccessible. Otherwise, a racy use-after-free will have inconsistent
242 // metadata.
243 Meta->RecordDeallocation();
244
245 // Ensure that the unwinder is not called if the recursive flag is set,
246 // otherwise non-reentrant unwinders may deadlock.
247 if (!getThreadLocals()->RecursiveGuard) {
248 ScopedRecursiveGuard SRG;
249 Meta->DeallocationTrace.RecordBacktrace(Backtrace);
250 }
251 }
252
253 deallocateInGuardedPool(reinterpret_cast<void *>(SlotStart),
254 State.maximumAllocationSize());
255
256 // And finally, lock again to release the slot back into the pool.
257 ScopedLock L(PoolMutex);
258 freeSlot(Slot);
259 }
260
getSize(const void * Ptr)261 size_t GuardedPoolAllocator::getSize(const void *Ptr) {
262 assert(pointerIsMine(Ptr));
263 ScopedLock L(PoolMutex);
264 AllocationMetadata *Meta = addrToMetadata(reinterpret_cast<uintptr_t>(Ptr));
265 assert(Meta->Addr == reinterpret_cast<uintptr_t>(Ptr));
266 return Meta->Size;
267 }
268
addrToMetadata(uintptr_t Ptr) const269 AllocationMetadata *GuardedPoolAllocator::addrToMetadata(uintptr_t Ptr) const {
270 return &Metadata[State.getNearestSlot(Ptr)];
271 }
272
reserveSlot()273 size_t GuardedPoolAllocator::reserveSlot() {
274 // Avoid potential reuse of a slot before we have made at least a single
275 // allocation in each slot. Helps with our use-after-free detection.
276 if (NumSampledAllocations < State.MaxSimultaneousAllocations)
277 return NumSampledAllocations++;
278
279 if (FreeSlotsLength == 0)
280 return kInvalidSlotID;
281
282 size_t ReservedIndex = getRandomUnsigned32() % FreeSlotsLength;
283 size_t SlotIndex = FreeSlots[ReservedIndex];
284 FreeSlots[ReservedIndex] = FreeSlots[--FreeSlotsLength];
285 return SlotIndex;
286 }
287
freeSlot(size_t SlotIndex)288 void GuardedPoolAllocator::freeSlot(size_t SlotIndex) {
289 assert(FreeSlotsLength < State.MaxSimultaneousAllocations);
290 FreeSlots[FreeSlotsLength++] = SlotIndex;
291 }
292
getRandomUnsigned32()293 uint32_t GuardedPoolAllocator::getRandomUnsigned32() {
294 uint32_t RandomState = getThreadLocals()->RandomState;
295 RandomState ^= RandomState << 13;
296 RandomState ^= RandomState >> 17;
297 RandomState ^= RandomState << 5;
298 getThreadLocals()->RandomState = RandomState;
299 return RandomState;
300 }
301 } // namespace gwp_asan
302