1 //===-- secondary.h ---------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #ifndef SCUDO_SECONDARY_H_
10 #define SCUDO_SECONDARY_H_
11
12 #include "common.h"
13 #include "list.h"
14 #include "mutex.h"
15 #include "stats.h"
16 #include "string_utils.h"
17
18 namespace scudo {
19
20 // This allocator wraps the platform allocation primitives, and as such is on
21 // the slower side and should preferably be used for larger sized allocations.
22 // Blocks allocated will be preceded and followed by a guard page, and hold
23 // their own header that is not checksummed: the guard pages and the Combined
24 // header should be enough for our purpose.
25
26 namespace LargeBlock {
27
28 struct Header {
29 LargeBlock::Header *Prev;
30 LargeBlock::Header *Next;
31 uptr BlockEnd;
32 uptr MapBase;
33 uptr MapSize;
34 MapPlatformData Data;
35 };
36
getHeaderSize()37 constexpr uptr getHeaderSize() {
38 return roundUpTo(sizeof(Header), 1U << SCUDO_MIN_ALIGNMENT_LOG);
39 }
40
getHeader(uptr Ptr)41 static Header *getHeader(uptr Ptr) {
42 return reinterpret_cast<Header *>(Ptr - getHeaderSize());
43 }
44
getHeader(const void * Ptr)45 static Header *getHeader(const void *Ptr) {
46 return getHeader(reinterpret_cast<uptr>(Ptr));
47 }
48
49 } // namespace LargeBlock
50
51 class MapAllocatorNoCache {
52 public:
initLinkerInitialized(UNUSED s32 ReleaseToOsInterval)53 void initLinkerInitialized(UNUSED s32 ReleaseToOsInterval) {}
init(UNUSED s32 ReleaseToOsInterval)54 void init(UNUSED s32 ReleaseToOsInterval) {}
retrieve(UNUSED uptr Size,UNUSED LargeBlock::Header ** H)55 bool retrieve(UNUSED uptr Size, UNUSED LargeBlock::Header **H) {
56 return false;
57 }
store(UNUSED LargeBlock::Header * H)58 bool store(UNUSED LargeBlock::Header *H) { return false; }
canCache(UNUSED uptr Size)59 bool canCache(UNUSED uptr Size) { return false; }
disable()60 void disable() {}
enable()61 void enable() {}
releaseToOS()62 void releaseToOS() {}
setOption(Option O,UNUSED sptr Value)63 bool setOption(Option O, UNUSED sptr Value) {
64 if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount ||
65 O == Option::MaxCacheEntrySize)
66 return false;
67 // Not supported by the Secondary Cache, but not an error either.
68 return true;
69 }
70 };
71
72 template <u32 EntriesArraySize = 32U, u32 DefaultMaxEntriesCount = 32U,
73 uptr DefaultMaxEntrySize = 1UL << 19,
74 s32 MinReleaseToOsIntervalMs = INT32_MIN,
75 s32 MaxReleaseToOsIntervalMs = INT32_MAX>
76 class MapAllocatorCache {
77 public:
78 // Fuchsia doesn't allow releasing Secondary blocks yet. Note that 0 length
79 // arrays are an extension for some compilers.
80 // FIXME(kostyak): support (partially) the cache on Fuchsia.
81 static_assert(!SCUDO_FUCHSIA || EntriesArraySize == 0U, "");
82
83 // Ensure the default maximum specified fits the array.
84 static_assert(DefaultMaxEntriesCount <= EntriesArraySize, "");
85
initLinkerInitialized(s32 ReleaseToOsInterval)86 void initLinkerInitialized(s32 ReleaseToOsInterval) {
87 setOption(Option::MaxCacheEntriesCount,
88 static_cast<sptr>(DefaultMaxEntriesCount));
89 setOption(Option::MaxCacheEntrySize,
90 static_cast<sptr>(DefaultMaxEntrySize));
91 setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
92 }
init(s32 ReleaseToOsInterval)93 void init(s32 ReleaseToOsInterval) {
94 memset(this, 0, sizeof(*this));
95 initLinkerInitialized(ReleaseToOsInterval);
96 }
97
store(LargeBlock::Header * H)98 bool store(LargeBlock::Header *H) {
99 bool EntryCached = false;
100 bool EmptyCache = false;
101 const u64 Time = getMonotonicTime();
102 const u32 MaxCount = atomic_load(&MaxEntriesCount, memory_order_relaxed);
103 {
104 ScopedLock L(Mutex);
105 if (EntriesCount >= MaxCount) {
106 if (IsFullEvents++ == 4U)
107 EmptyCache = true;
108 } else {
109 for (u32 I = 0; I < MaxCount; I++) {
110 if (Entries[I].Block)
111 continue;
112 if (I != 0)
113 Entries[I] = Entries[0];
114 Entries[0].Block = reinterpret_cast<uptr>(H);
115 Entries[0].BlockEnd = H->BlockEnd;
116 Entries[0].MapBase = H->MapBase;
117 Entries[0].MapSize = H->MapSize;
118 Entries[0].Data = H->Data;
119 Entries[0].Time = Time;
120 EntriesCount++;
121 EntryCached = true;
122 break;
123 }
124 }
125 }
126 s32 Interval;
127 if (EmptyCache)
128 empty();
129 else if ((Interval = atomic_load(&ReleaseToOsIntervalMs,
130 memory_order_relaxed)) >= 0)
131 releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
132 return EntryCached;
133 }
134
retrieve(uptr Size,LargeBlock::Header ** H)135 bool retrieve(uptr Size, LargeBlock::Header **H) {
136 const uptr PageSize = getPageSizeCached();
137 const u32 MaxCount = atomic_load(&MaxEntriesCount, memory_order_relaxed);
138 ScopedLock L(Mutex);
139 if (EntriesCount == 0)
140 return false;
141 for (u32 I = 0; I < MaxCount; I++) {
142 if (!Entries[I].Block)
143 continue;
144 const uptr BlockSize = Entries[I].BlockEnd - Entries[I].Block;
145 if (Size > BlockSize)
146 continue;
147 if (Size < BlockSize - PageSize * 4U)
148 continue;
149 *H = reinterpret_cast<LargeBlock::Header *>(Entries[I].Block);
150 Entries[I].Block = 0;
151 (*H)->BlockEnd = Entries[I].BlockEnd;
152 (*H)->MapBase = Entries[I].MapBase;
153 (*H)->MapSize = Entries[I].MapSize;
154 (*H)->Data = Entries[I].Data;
155 EntriesCount--;
156 return true;
157 }
158 return false;
159 }
160
canCache(uptr Size)161 bool canCache(uptr Size) {
162 return atomic_load(&MaxEntriesCount, memory_order_relaxed) != 0U &&
163 Size <= atomic_load(&MaxEntrySize, memory_order_relaxed);
164 }
165
setOption(Option O,sptr Value)166 bool setOption(Option O, sptr Value) {
167 if (O == Option::ReleaseInterval) {
168 const s32 Interval =
169 Max(Min(static_cast<s32>(Value), MaxReleaseToOsIntervalMs),
170 MinReleaseToOsIntervalMs);
171 atomic_store(&ReleaseToOsIntervalMs, Interval, memory_order_relaxed);
172 return true;
173 } else if (O == Option::MaxCacheEntriesCount) {
174 const u32 MaxCount = static_cast<u32>(Value);
175 if (MaxCount > EntriesArraySize)
176 return false;
177 atomic_store(&MaxEntriesCount, MaxCount, memory_order_relaxed);
178 return true;
179 } else if (O == Option::MaxCacheEntrySize) {
180 atomic_store(&MaxEntrySize, static_cast<uptr>(Value),
181 memory_order_relaxed);
182 return true;
183 }
184 // Not supported by the Secondary Cache, but not an error either.
185 return true;
186 }
187
releaseToOS()188 void releaseToOS() { releaseOlderThan(UINT64_MAX); }
189
disable()190 void disable() { Mutex.lock(); }
191
enable()192 void enable() { Mutex.unlock(); }
193
194 private:
empty()195 void empty() {
196 struct {
197 void *MapBase;
198 uptr MapSize;
199 MapPlatformData Data;
200 } MapInfo[EntriesArraySize];
201 uptr N = 0;
202 {
203 ScopedLock L(Mutex);
204 for (uptr I = 0; I < EntriesArraySize; I++) {
205 if (!Entries[I].Block)
206 continue;
207 MapInfo[N].MapBase = reinterpret_cast<void *>(Entries[I].MapBase);
208 MapInfo[N].MapSize = Entries[I].MapSize;
209 MapInfo[N].Data = Entries[I].Data;
210 Entries[I].Block = 0;
211 N++;
212 }
213 EntriesCount = 0;
214 IsFullEvents = 0;
215 }
216 for (uptr I = 0; I < N; I++)
217 unmap(MapInfo[I].MapBase, MapInfo[I].MapSize, UNMAP_ALL,
218 &MapInfo[I].Data);
219 }
220
releaseOlderThan(u64 Time)221 void releaseOlderThan(u64 Time) {
222 ScopedLock L(Mutex);
223 if (!EntriesCount)
224 return;
225 for (uptr I = 0; I < EntriesArraySize; I++) {
226 if (!Entries[I].Block || !Entries[I].Time || Entries[I].Time > Time)
227 continue;
228 releasePagesToOS(Entries[I].Block, 0,
229 Entries[I].BlockEnd - Entries[I].Block,
230 &Entries[I].Data);
231 Entries[I].Time = 0;
232 }
233 }
234
235 struct CachedBlock {
236 uptr Block;
237 uptr BlockEnd;
238 uptr MapBase;
239 uptr MapSize;
240 MapPlatformData Data;
241 u64 Time;
242 };
243
244 HybridMutex Mutex;
245 CachedBlock Entries[EntriesArraySize];
246 u32 EntriesCount;
247 atomic_u32 MaxEntriesCount;
248 atomic_uptr MaxEntrySize;
249 uptr LargestSize;
250 u32 IsFullEvents;
251 atomic_s32 ReleaseToOsIntervalMs;
252 };
253
254 template <class CacheT> class MapAllocator {
255 public:
256 void initLinkerInitialized(GlobalStats *S, s32 ReleaseToOsInterval = -1) {
257 Cache.initLinkerInitialized(ReleaseToOsInterval);
258 Stats.initLinkerInitialized();
259 if (LIKELY(S))
260 S->link(&Stats);
261 }
262 void init(GlobalStats *S, s32 ReleaseToOsInterval = -1) {
263 memset(this, 0, sizeof(*this));
264 initLinkerInitialized(S, ReleaseToOsInterval);
265 }
266
267 void *allocate(uptr Size, uptr AlignmentHint = 0, uptr *BlockEnd = nullptr,
268 bool ZeroContents = false);
269
270 void deallocate(void *Ptr);
271
getBlockEnd(void * Ptr)272 static uptr getBlockEnd(void *Ptr) {
273 return LargeBlock::getHeader(Ptr)->BlockEnd;
274 }
275
getBlockSize(void * Ptr)276 static uptr getBlockSize(void *Ptr) {
277 return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
278 }
279
280 void getStats(ScopedString *Str) const;
281
disable()282 void disable() {
283 Mutex.lock();
284 Cache.disable();
285 }
286
enable()287 void enable() {
288 Cache.enable();
289 Mutex.unlock();
290 }
291
iterateOverBlocks(F Callback)292 template <typename F> void iterateOverBlocks(F Callback) const {
293 for (const auto &H : InUseBlocks)
294 Callback(reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize());
295 }
296
canCache(uptr Size)297 uptr canCache(uptr Size) { return Cache.canCache(Size); }
298
setOption(Option O,sptr Value)299 bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
300
releaseToOS()301 void releaseToOS() { Cache.releaseToOS(); }
302
303 private:
304 CacheT Cache;
305
306 HybridMutex Mutex;
307 DoublyLinkedList<LargeBlock::Header> InUseBlocks;
308 uptr AllocatedBytes;
309 uptr FreedBytes;
310 uptr LargestSize;
311 u32 NumberOfAllocs;
312 u32 NumberOfFrees;
313 LocalStats Stats;
314 };
315
316 // As with the Primary, the size passed to this function includes any desired
317 // alignment, so that the frontend can align the user allocation. The hint
318 // parameter allows us to unmap spurious memory when dealing with larger
319 // (greater than a page) alignments on 32-bit platforms.
320 // Due to the sparsity of address space available on those platforms, requesting
321 // an allocation from the Secondary with a large alignment would end up wasting
322 // VA space (even though we are not committing the whole thing), hence the need
323 // to trim off some of the reserved space.
324 // For allocations requested with an alignment greater than or equal to a page,
325 // the committed memory will amount to something close to Size - AlignmentHint
326 // (pending rounding and headers).
327 template <class CacheT>
allocate(uptr Size,uptr AlignmentHint,uptr * BlockEnd,bool ZeroContents)328 void *MapAllocator<CacheT>::allocate(uptr Size, uptr AlignmentHint,
329 uptr *BlockEnd, bool ZeroContents) {
330 DCHECK_GE(Size, AlignmentHint);
331 const uptr PageSize = getPageSizeCached();
332 const uptr RoundedSize =
333 roundUpTo(Size + LargeBlock::getHeaderSize(), PageSize);
334
335 if (AlignmentHint < PageSize && Cache.canCache(RoundedSize)) {
336 LargeBlock::Header *H;
337 if (Cache.retrieve(RoundedSize, &H)) {
338 if (BlockEnd)
339 *BlockEnd = H->BlockEnd;
340 void *Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(H) +
341 LargeBlock::getHeaderSize());
342 if (ZeroContents)
343 memset(Ptr, 0, H->BlockEnd - reinterpret_cast<uptr>(Ptr));
344 const uptr BlockSize = H->BlockEnd - reinterpret_cast<uptr>(H);
345 {
346 ScopedLock L(Mutex);
347 InUseBlocks.push_back(H);
348 AllocatedBytes += BlockSize;
349 NumberOfAllocs++;
350 Stats.add(StatAllocated, BlockSize);
351 Stats.add(StatMapped, H->MapSize);
352 }
353 return Ptr;
354 }
355 }
356
357 MapPlatformData Data = {};
358 const uptr MapSize = RoundedSize + 2 * PageSize;
359 uptr MapBase =
360 reinterpret_cast<uptr>(map(nullptr, MapSize, "scudo:secondary",
361 MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
362 if (UNLIKELY(!MapBase))
363 return nullptr;
364 uptr CommitBase = MapBase + PageSize;
365 uptr MapEnd = MapBase + MapSize;
366
367 // In the unlikely event of alignments larger than a page, adjust the amount
368 // of memory we want to commit, and trim the extra memory.
369 if (UNLIKELY(AlignmentHint >= PageSize)) {
370 // For alignments greater than or equal to a page, the user pointer (eg: the
371 // pointer that is returned by the C or C++ allocation APIs) ends up on a
372 // page boundary , and our headers will live in the preceding page.
373 CommitBase = roundUpTo(MapBase + PageSize + 1, AlignmentHint) - PageSize;
374 const uptr NewMapBase = CommitBase - PageSize;
375 DCHECK_GE(NewMapBase, MapBase);
376 // We only trim the extra memory on 32-bit platforms: 64-bit platforms
377 // are less constrained memory wise, and that saves us two syscalls.
378 if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
379 unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data);
380 MapBase = NewMapBase;
381 }
382 const uptr NewMapEnd = CommitBase + PageSize +
383 roundUpTo((Size - AlignmentHint), PageSize) +
384 PageSize;
385 DCHECK_LE(NewMapEnd, MapEnd);
386 if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
387 unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
388 MapEnd = NewMapEnd;
389 }
390 }
391
392 const uptr CommitSize = MapEnd - PageSize - CommitBase;
393 const uptr Ptr =
394 reinterpret_cast<uptr>(map(reinterpret_cast<void *>(CommitBase),
395 CommitSize, "scudo:secondary", 0, &Data));
396 LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(Ptr);
397 H->MapBase = MapBase;
398 H->MapSize = MapEnd - MapBase;
399 H->BlockEnd = CommitBase + CommitSize;
400 H->Data = Data;
401 if (BlockEnd)
402 *BlockEnd = CommitBase + CommitSize;
403 {
404 ScopedLock L(Mutex);
405 InUseBlocks.push_back(H);
406 AllocatedBytes += CommitSize;
407 if (LargestSize < CommitSize)
408 LargestSize = CommitSize;
409 NumberOfAllocs++;
410 Stats.add(StatAllocated, CommitSize);
411 Stats.add(StatMapped, H->MapSize);
412 }
413 return reinterpret_cast<void *>(Ptr + LargeBlock::getHeaderSize());
414 }
415
deallocate(void * Ptr)416 template <class CacheT> void MapAllocator<CacheT>::deallocate(void *Ptr) {
417 LargeBlock::Header *H = LargeBlock::getHeader(Ptr);
418 const uptr Block = reinterpret_cast<uptr>(H);
419 const uptr CommitSize = H->BlockEnd - Block;
420 {
421 ScopedLock L(Mutex);
422 InUseBlocks.remove(H);
423 FreedBytes += CommitSize;
424 NumberOfFrees++;
425 Stats.sub(StatAllocated, CommitSize);
426 Stats.sub(StatMapped, H->MapSize);
427 }
428 if (Cache.canCache(CommitSize) && Cache.store(H))
429 return;
430 void *Addr = reinterpret_cast<void *>(H->MapBase);
431 const uptr Size = H->MapSize;
432 MapPlatformData Data = H->Data;
433 unmap(Addr, Size, UNMAP_ALL, &Data);
434 }
435
436 template <class CacheT>
getStats(ScopedString * Str)437 void MapAllocator<CacheT>::getStats(ScopedString *Str) const {
438 Str->append(
439 "Stats: MapAllocator: allocated %zu times (%zuK), freed %zu times "
440 "(%zuK), remains %zu (%zuK) max %zuM\n",
441 NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, FreedBytes >> 10,
442 NumberOfAllocs - NumberOfFrees, (AllocatedBytes - FreedBytes) >> 10,
443 LargestSize >> 20);
444 }
445
446 } // namespace scudo
447
448 #endif // SCUDO_SECONDARY_H_
449