1 //===-- secondary.h ---------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #ifndef SCUDO_SECONDARY_H_
10 #define SCUDO_SECONDARY_H_
11
12 #include "common.h"
13 #include "list.h"
14 #include "mutex.h"
15 #include "stats.h"
16 #include "string_utils.h"
17
18 namespace scudo {
19
20 // This allocator wraps the platform allocation primitives, and as such is on
21 // the slower side and should preferably be used for larger sized allocations.
22 // Blocks allocated will be preceded and followed by a guard page, and hold
23 // their own header that is not checksummed: the guard pages and the Combined
24 // header should be enough for our purpose.
25
26 namespace LargeBlock {
27
28 struct Header {
29 LargeBlock::Header *Prev;
30 LargeBlock::Header *Next;
31 uptr BlockEnd;
32 uptr MapBase;
33 uptr MapSize;
34 MapPlatformData Data;
35 };
36
getHeaderSize()37 constexpr uptr getHeaderSize() {
38 return roundUpTo(sizeof(Header), 1U << SCUDO_MIN_ALIGNMENT_LOG);
39 }
40
getHeader(uptr Ptr)41 static Header *getHeader(uptr Ptr) {
42 return reinterpret_cast<Header *>(Ptr - getHeaderSize());
43 }
44
getHeader(const void * Ptr)45 static Header *getHeader(const void *Ptr) {
46 return getHeader(reinterpret_cast<uptr>(Ptr));
47 }
48
49 } // namespace LargeBlock
50
51 class MapAllocatorNoCache {
52 public:
initLinkerInitialized(UNUSED s32 ReleaseToOsInterval)53 void initLinkerInitialized(UNUSED s32 ReleaseToOsInterval) {}
init(UNUSED s32 ReleaseToOsInterval)54 void init(UNUSED s32 ReleaseToOsInterval) {}
retrieve(UNUSED uptr Size,UNUSED LargeBlock::Header ** H,UNUSED bool * Zeroed)55 bool retrieve(UNUSED uptr Size, UNUSED LargeBlock::Header **H,
56 UNUSED bool *Zeroed) {
57 return false;
58 }
store(UNUSED LargeBlock::Header * H)59 bool store(UNUSED LargeBlock::Header *H) { return false; }
canCache(UNUSED uptr Size)60 bool canCache(UNUSED uptr Size) { return false; }
disable()61 void disable() {}
enable()62 void enable() {}
releaseToOS()63 void releaseToOS() {}
setOption(Option O,UNUSED sptr Value)64 bool setOption(Option O, UNUSED sptr Value) {
65 if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount ||
66 O == Option::MaxCacheEntrySize)
67 return false;
68 // Not supported by the Secondary Cache, but not an error either.
69 return true;
70 }
71 };
72
73 template <u32 EntriesArraySize = 32U, u32 DefaultMaxEntriesCount = 32U,
74 uptr DefaultMaxEntrySize = 1UL << 19,
75 s32 MinReleaseToOsIntervalMs = INT32_MIN,
76 s32 MaxReleaseToOsIntervalMs = INT32_MAX>
77 class MapAllocatorCache {
78 public:
79 // Ensure the default maximum specified fits the array.
80 static_assert(DefaultMaxEntriesCount <= EntriesArraySize, "");
81
initLinkerInitialized(s32 ReleaseToOsInterval)82 void initLinkerInitialized(s32 ReleaseToOsInterval) {
83 setOption(Option::MaxCacheEntriesCount,
84 static_cast<sptr>(DefaultMaxEntriesCount));
85 setOption(Option::MaxCacheEntrySize,
86 static_cast<sptr>(DefaultMaxEntrySize));
87 setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
88 }
init(s32 ReleaseToOsInterval)89 void init(s32 ReleaseToOsInterval) {
90 memset(this, 0, sizeof(*this));
91 initLinkerInitialized(ReleaseToOsInterval);
92 }
93
store(LargeBlock::Header * H)94 bool store(LargeBlock::Header *H) {
95 bool EntryCached = false;
96 bool EmptyCache = false;
97 const u64 Time = getMonotonicTime();
98 const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
99 {
100 ScopedLock L(Mutex);
101 if (EntriesCount >= MaxCount) {
102 if (IsFullEvents++ == 4U)
103 EmptyCache = true;
104 } else {
105 for (u32 I = 0; I < MaxCount; I++) {
106 if (Entries[I].Block)
107 continue;
108 if (I != 0)
109 Entries[I] = Entries[0];
110 Entries[0].Block = reinterpret_cast<uptr>(H);
111 Entries[0].BlockEnd = H->BlockEnd;
112 Entries[0].MapBase = H->MapBase;
113 Entries[0].MapSize = H->MapSize;
114 Entries[0].Data = H->Data;
115 Entries[0].Time = Time;
116 EntriesCount++;
117 EntryCached = true;
118 break;
119 }
120 }
121 }
122 s32 Interval;
123 if (EmptyCache)
124 empty();
125 else if ((Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs)) >= 0)
126 releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
127 return EntryCached;
128 }
129
retrieve(uptr Size,LargeBlock::Header ** H,bool * Zeroed)130 bool retrieve(uptr Size, LargeBlock::Header **H, bool *Zeroed) {
131 const uptr PageSize = getPageSizeCached();
132 const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
133 ScopedLock L(Mutex);
134 if (EntriesCount == 0)
135 return false;
136 for (u32 I = 0; I < MaxCount; I++) {
137 if (!Entries[I].Block)
138 continue;
139 const uptr BlockSize = Entries[I].BlockEnd - Entries[I].Block;
140 if (Size > BlockSize)
141 continue;
142 if (Size < BlockSize - PageSize * 4U)
143 continue;
144 *H = reinterpret_cast<LargeBlock::Header *>(Entries[I].Block);
145 *Zeroed = Entries[I].Time == 0;
146 Entries[I].Block = 0;
147 (*H)->BlockEnd = Entries[I].BlockEnd;
148 (*H)->MapBase = Entries[I].MapBase;
149 (*H)->MapSize = Entries[I].MapSize;
150 (*H)->Data = Entries[I].Data;
151 EntriesCount--;
152 return true;
153 }
154 return false;
155 }
156
canCache(uptr Size)157 bool canCache(uptr Size) {
158 return atomic_load_relaxed(&MaxEntriesCount) != 0U &&
159 Size <= atomic_load_relaxed(&MaxEntrySize);
160 }
161
setOption(Option O,sptr Value)162 bool setOption(Option O, sptr Value) {
163 if (O == Option::ReleaseInterval) {
164 const s32 Interval =
165 Max(Min(static_cast<s32>(Value), MaxReleaseToOsIntervalMs),
166 MinReleaseToOsIntervalMs);
167 atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
168 return true;
169 } else if (O == Option::MaxCacheEntriesCount) {
170 const u32 MaxCount = static_cast<u32>(Value);
171 if (MaxCount > EntriesArraySize)
172 return false;
173 atomic_store_relaxed(&MaxEntriesCount, MaxCount);
174 return true;
175 } else if (O == Option::MaxCacheEntrySize) {
176 atomic_store_relaxed(&MaxEntrySize, static_cast<uptr>(Value));
177 return true;
178 }
179 // Not supported by the Secondary Cache, but not an error either.
180 return true;
181 }
182
releaseToOS()183 void releaseToOS() { releaseOlderThan(UINT64_MAX); }
184
disable()185 void disable() { Mutex.lock(); }
186
enable()187 void enable() { Mutex.unlock(); }
188
189 private:
empty()190 void empty() {
191 struct {
192 void *MapBase;
193 uptr MapSize;
194 MapPlatformData Data;
195 } MapInfo[EntriesArraySize];
196 uptr N = 0;
197 {
198 ScopedLock L(Mutex);
199 for (uptr I = 0; I < EntriesArraySize; I++) {
200 if (!Entries[I].Block)
201 continue;
202 MapInfo[N].MapBase = reinterpret_cast<void *>(Entries[I].MapBase);
203 MapInfo[N].MapSize = Entries[I].MapSize;
204 MapInfo[N].Data = Entries[I].Data;
205 Entries[I].Block = 0;
206 N++;
207 }
208 EntriesCount = 0;
209 IsFullEvents = 0;
210 }
211 for (uptr I = 0; I < N; I++)
212 unmap(MapInfo[I].MapBase, MapInfo[I].MapSize, UNMAP_ALL,
213 &MapInfo[I].Data);
214 }
215
releaseOlderThan(u64 Time)216 void releaseOlderThan(u64 Time) {
217 ScopedLock L(Mutex);
218 if (!EntriesCount)
219 return;
220 for (uptr I = 0; I < EntriesArraySize; I++) {
221 if (!Entries[I].Block || !Entries[I].Time || Entries[I].Time > Time)
222 continue;
223 releasePagesToOS(Entries[I].Block, 0,
224 Entries[I].BlockEnd - Entries[I].Block,
225 &Entries[I].Data);
226 Entries[I].Time = 0;
227 }
228 }
229
230 struct CachedBlock {
231 uptr Block;
232 uptr BlockEnd;
233 uptr MapBase;
234 uptr MapSize;
235 MapPlatformData Data;
236 u64 Time;
237 };
238
239 HybridMutex Mutex;
240 CachedBlock Entries[EntriesArraySize];
241 u32 EntriesCount;
242 atomic_u32 MaxEntriesCount;
243 atomic_uptr MaxEntrySize;
244 uptr LargestSize;
245 u32 IsFullEvents;
246 atomic_s32 ReleaseToOsIntervalMs;
247 };
248
249 template <class CacheT> class MapAllocator {
250 public:
251 void initLinkerInitialized(GlobalStats *S, s32 ReleaseToOsInterval = -1) {
252 Cache.initLinkerInitialized(ReleaseToOsInterval);
253 Stats.initLinkerInitialized();
254 if (LIKELY(S))
255 S->link(&Stats);
256 }
257 void init(GlobalStats *S, s32 ReleaseToOsInterval = -1) {
258 memset(this, 0, sizeof(*this));
259 initLinkerInitialized(S, ReleaseToOsInterval);
260 }
261
262 void *allocate(uptr Size, uptr AlignmentHint = 0, uptr *BlockEnd = nullptr,
263 FillContentsMode FillContents = NoFill);
264
265 void deallocate(void *Ptr);
266
getBlockEnd(void * Ptr)267 static uptr getBlockEnd(void *Ptr) {
268 return LargeBlock::getHeader(Ptr)->BlockEnd;
269 }
270
getBlockSize(void * Ptr)271 static uptr getBlockSize(void *Ptr) {
272 return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
273 }
274
275 void getStats(ScopedString *Str) const;
276
disable()277 void disable() {
278 Mutex.lock();
279 Cache.disable();
280 }
281
enable()282 void enable() {
283 Cache.enable();
284 Mutex.unlock();
285 }
286
iterateOverBlocks(F Callback)287 template <typename F> void iterateOverBlocks(F Callback) const {
288 for (const auto &H : InUseBlocks)
289 Callback(reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize());
290 }
291
canCache(uptr Size)292 uptr canCache(uptr Size) { return Cache.canCache(Size); }
293
setOption(Option O,sptr Value)294 bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
295
releaseToOS()296 void releaseToOS() { Cache.releaseToOS(); }
297
298 private:
299 CacheT Cache;
300
301 HybridMutex Mutex;
302 DoublyLinkedList<LargeBlock::Header> InUseBlocks;
303 uptr AllocatedBytes;
304 uptr FreedBytes;
305 uptr LargestSize;
306 u32 NumberOfAllocs;
307 u32 NumberOfFrees;
308 LocalStats Stats;
309 };
310
311 // As with the Primary, the size passed to this function includes any desired
312 // alignment, so that the frontend can align the user allocation. The hint
313 // parameter allows us to unmap spurious memory when dealing with larger
314 // (greater than a page) alignments on 32-bit platforms.
315 // Due to the sparsity of address space available on those platforms, requesting
316 // an allocation from the Secondary with a large alignment would end up wasting
317 // VA space (even though we are not committing the whole thing), hence the need
318 // to trim off some of the reserved space.
319 // For allocations requested with an alignment greater than or equal to a page,
320 // the committed memory will amount to something close to Size - AlignmentHint
321 // (pending rounding and headers).
322 template <class CacheT>
allocate(uptr Size,uptr AlignmentHint,uptr * BlockEnd,FillContentsMode FillContents)323 void *MapAllocator<CacheT>::allocate(uptr Size, uptr AlignmentHint,
324 uptr *BlockEnd,
325 FillContentsMode FillContents) {
326 DCHECK_GE(Size, AlignmentHint);
327 const uptr PageSize = getPageSizeCached();
328 const uptr RoundedSize =
329 roundUpTo(Size + LargeBlock::getHeaderSize(), PageSize);
330
331 if (AlignmentHint < PageSize && Cache.canCache(RoundedSize)) {
332 LargeBlock::Header *H;
333 bool Zeroed;
334 if (Cache.retrieve(RoundedSize, &H, &Zeroed)) {
335 if (BlockEnd)
336 *BlockEnd = H->BlockEnd;
337 void *Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(H) +
338 LargeBlock::getHeaderSize());
339 if (FillContents && !Zeroed)
340 memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
341 H->BlockEnd - reinterpret_cast<uptr>(Ptr));
342 const uptr BlockSize = H->BlockEnd - reinterpret_cast<uptr>(H);
343 {
344 ScopedLock L(Mutex);
345 InUseBlocks.push_back(H);
346 AllocatedBytes += BlockSize;
347 NumberOfAllocs++;
348 Stats.add(StatAllocated, BlockSize);
349 Stats.add(StatMapped, H->MapSize);
350 }
351 return Ptr;
352 }
353 }
354
355 MapPlatformData Data = {};
356 const uptr MapSize = RoundedSize + 2 * PageSize;
357 uptr MapBase =
358 reinterpret_cast<uptr>(map(nullptr, MapSize, "scudo:secondary",
359 MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
360 if (UNLIKELY(!MapBase))
361 return nullptr;
362 uptr CommitBase = MapBase + PageSize;
363 uptr MapEnd = MapBase + MapSize;
364
365 // In the unlikely event of alignments larger than a page, adjust the amount
366 // of memory we want to commit, and trim the extra memory.
367 if (UNLIKELY(AlignmentHint >= PageSize)) {
368 // For alignments greater than or equal to a page, the user pointer (eg: the
369 // pointer that is returned by the C or C++ allocation APIs) ends up on a
370 // page boundary , and our headers will live in the preceding page.
371 CommitBase = roundUpTo(MapBase + PageSize + 1, AlignmentHint) - PageSize;
372 const uptr NewMapBase = CommitBase - PageSize;
373 DCHECK_GE(NewMapBase, MapBase);
374 // We only trim the extra memory on 32-bit platforms: 64-bit platforms
375 // are less constrained memory wise, and that saves us two syscalls.
376 if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
377 unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data);
378 MapBase = NewMapBase;
379 }
380 const uptr NewMapEnd = CommitBase + PageSize +
381 roundUpTo((Size - AlignmentHint), PageSize) +
382 PageSize;
383 DCHECK_LE(NewMapEnd, MapEnd);
384 if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
385 unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
386 MapEnd = NewMapEnd;
387 }
388 }
389
390 const uptr CommitSize = MapEnd - PageSize - CommitBase;
391 const uptr Ptr = reinterpret_cast<uptr>(
392 map(reinterpret_cast<void *>(CommitBase), CommitSize, "scudo:secondary",
393 MAP_RESIZABLE, &Data));
394 LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(Ptr);
395 H->MapBase = MapBase;
396 H->MapSize = MapEnd - MapBase;
397 H->BlockEnd = CommitBase + CommitSize;
398 H->Data = Data;
399 if (BlockEnd)
400 *BlockEnd = CommitBase + CommitSize;
401 {
402 ScopedLock L(Mutex);
403 InUseBlocks.push_back(H);
404 AllocatedBytes += CommitSize;
405 if (LargestSize < CommitSize)
406 LargestSize = CommitSize;
407 NumberOfAllocs++;
408 Stats.add(StatAllocated, CommitSize);
409 Stats.add(StatMapped, H->MapSize);
410 }
411 return reinterpret_cast<void *>(Ptr + LargeBlock::getHeaderSize());
412 }
413
deallocate(void * Ptr)414 template <class CacheT> void MapAllocator<CacheT>::deallocate(void *Ptr) {
415 LargeBlock::Header *H = LargeBlock::getHeader(Ptr);
416 const uptr Block = reinterpret_cast<uptr>(H);
417 const uptr CommitSize = H->BlockEnd - Block;
418 {
419 ScopedLock L(Mutex);
420 InUseBlocks.remove(H);
421 FreedBytes += CommitSize;
422 NumberOfFrees++;
423 Stats.sub(StatAllocated, CommitSize);
424 Stats.sub(StatMapped, H->MapSize);
425 }
426 if (Cache.canCache(CommitSize) && Cache.store(H))
427 return;
428 void *Addr = reinterpret_cast<void *>(H->MapBase);
429 const uptr Size = H->MapSize;
430 MapPlatformData Data = H->Data;
431 unmap(Addr, Size, UNMAP_ALL, &Data);
432 }
433
434 template <class CacheT>
getStats(ScopedString * Str)435 void MapAllocator<CacheT>::getStats(ScopedString *Str) const {
436 Str->append(
437 "Stats: MapAllocator: allocated %zu times (%zuK), freed %zu times "
438 "(%zuK), remains %zu (%zuK) max %zuM\n",
439 NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, FreedBytes >> 10,
440 NumberOfAllocs - NumberOfFrees, (AllocatedBytes - FreedBytes) >> 10,
441 LargestSize >> 20);
442 }
443
444 } // namespace scudo
445
446 #endif // SCUDO_SECONDARY_H_
447