1 //===-- secondary.h ---------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #ifndef SCUDO_SECONDARY_H_
10 #define SCUDO_SECONDARY_H_
11
12 #include "chunk.h"
13 #include "common.h"
14 #include "list.h"
15 #include "mem_map.h"
16 #include "memtag.h"
17 #include "mutex.h"
18 #include "options.h"
19 #include "stats.h"
20 #include "string_utils.h"
21 #include "thread_annotations.h"
22
23 namespace scudo {
24
25 // This allocator wraps the platform allocation primitives, and as such is on
26 // the slower side and should preferably be used for larger sized allocations.
27 // Blocks allocated will be preceded and followed by a guard page, and hold
28 // their own header that is not checksummed: the guard pages and the Combined
29 // header should be enough for our purpose.
30
31 namespace LargeBlock {
32
33 struct alignas(Max<uptr>(archSupportsMemoryTagging()
34 ? archMemoryTagGranuleSize()
35 : 1,
36 1U << SCUDO_MIN_ALIGNMENT_LOG)) Header {
37 LargeBlock::Header *Prev;
38 LargeBlock::Header *Next;
39 uptr CommitBase;
40 uptr CommitSize;
41 MemMapT MemMap;
42 };
43
44 static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, "");
45 static_assert(!archSupportsMemoryTagging() ||
46 sizeof(Header) % archMemoryTagGranuleSize() == 0,
47 "");
48
getHeaderSize()49 constexpr uptr getHeaderSize() { return sizeof(Header); }
50
addHeaderTag(uptr Ptr)51 template <typename Config> static uptr addHeaderTag(uptr Ptr) {
52 if (allocatorSupportsMemoryTagging<Config>())
53 return addFixedTag(Ptr, 1);
54 return Ptr;
55 }
56
getHeader(uptr Ptr)57 template <typename Config> static Header *getHeader(uptr Ptr) {
58 return reinterpret_cast<Header *>(addHeaderTag<Config>(Ptr)) - 1;
59 }
60
getHeader(const void * Ptr)61 template <typename Config> static Header *getHeader(const void *Ptr) {
62 return getHeader<Config>(reinterpret_cast<uptr>(Ptr));
63 }
64
65 } // namespace LargeBlock
66
unmap(LargeBlock::Header * H)67 static inline void unmap(LargeBlock::Header *H) {
68 // Note that the `H->MapMap` is stored on the pages managed by itself. Take
69 // over the ownership before unmap() so that any operation along with unmap()
70 // won't touch inaccessible pages.
71 MemMapT MemMap = H->MemMap;
72 MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
73 }
74
75 namespace {
76 struct CachedBlock {
77 uptr CommitBase = 0;
78 uptr CommitSize = 0;
79 uptr BlockBegin = 0;
80 MemMapT MemMap = {};
81 u64 Time = 0;
82
isValidCachedBlock83 bool isValid() { return CommitBase != 0; }
84
invalidateCachedBlock85 void invalidate() { CommitBase = 0; }
86 };
87 } // namespace
88
89 template <typename Config> class MapAllocatorNoCache {
90 public:
init(UNUSED s32 ReleaseToOsInterval)91 void init(UNUSED s32 ReleaseToOsInterval) {}
retrieve(UNUSED Options Options,UNUSED uptr Size,UNUSED uptr Alignment,UNUSED uptr HeadersSize,UNUSED LargeBlock::Header ** H,UNUSED bool * Zeroed)92 bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
93 UNUSED uptr HeadersSize, UNUSED LargeBlock::Header **H,
94 UNUSED bool *Zeroed) {
95 return false;
96 }
store(UNUSED Options Options,LargeBlock::Header * H)97 void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
canCache(UNUSED uptr Size)98 bool canCache(UNUSED uptr Size) { return false; }
disable()99 void disable() {}
enable()100 void enable() {}
releaseToOS()101 void releaseToOS() {}
disableMemoryTagging()102 void disableMemoryTagging() {}
unmapTestOnly()103 void unmapTestOnly() {}
setOption(Option O,UNUSED sptr Value)104 bool setOption(Option O, UNUSED sptr Value) {
105 if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount ||
106 O == Option::MaxCacheEntrySize)
107 return false;
108 // Not supported by the Secondary Cache, but not an error either.
109 return true;
110 }
111
getStats(UNUSED ScopedString * Str)112 void getStats(UNUSED ScopedString *Str) {
113 Str->append("Secondary Cache Disabled\n");
114 }
115 };
116
117 static const uptr MaxUnusedCachePages = 4U;
118
119 template <typename Config>
mapSecondary(const Options & Options,uptr CommitBase,uptr CommitSize,uptr AllocPos,uptr Flags,MemMapT & MemMap)120 bool mapSecondary(const Options &Options, uptr CommitBase, uptr CommitSize,
121 uptr AllocPos, uptr Flags, MemMapT &MemMap) {
122 Flags |= MAP_RESIZABLE;
123 Flags |= MAP_ALLOWNOMEM;
124
125 const uptr PageSize = getPageSizeCached();
126 if (SCUDO_TRUSTY) {
127 /*
128 * On Trusty we need AllocPos to be usable for shared memory, which cannot
129 * cross multiple mappings. This means we need to split around AllocPos
130 * and not over it. We can only do this if the address is page-aligned.
131 */
132 const uptr TaggedSize = AllocPos - CommitBase;
133 if (useMemoryTagging<Config>(Options) && isAligned(TaggedSize, PageSize)) {
134 DCHECK_GT(TaggedSize, 0);
135 return MemMap.remap(CommitBase, TaggedSize, "scudo:secondary",
136 MAP_MEMTAG | Flags) &&
137 MemMap.remap(AllocPos, CommitSize - TaggedSize, "scudo:secondary",
138 Flags);
139 } else {
140 const uptr RemapFlags =
141 (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags;
142 return MemMap.remap(CommitBase, CommitSize, "scudo:secondary",
143 RemapFlags);
144 }
145 }
146
147 const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * PageSize;
148 if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) {
149 const uptr UntaggedPos = Max(AllocPos, CommitBase + MaxUnusedCacheBytes);
150 return MemMap.remap(CommitBase, UntaggedPos - CommitBase, "scudo:secondary",
151 MAP_MEMTAG | Flags) &&
152 MemMap.remap(UntaggedPos, CommitBase + CommitSize - UntaggedPos,
153 "scudo:secondary", Flags);
154 } else {
155 const uptr RemapFlags =
156 (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags;
157 return MemMap.remap(CommitBase, CommitSize, "scudo:secondary", RemapFlags);
158 }
159 }
160
161 // Template specialization to avoid producing zero-length array
162 template <typename T, size_t Size> class NonZeroLengthArray {
163 public:
164 T &operator[](uptr Idx) { return values[Idx]; }
165
166 private:
167 T values[Size];
168 };
169 template <typename T> class NonZeroLengthArray<T, 0> {
170 public:
171 T &operator[](uptr UNUSED Idx) { UNREACHABLE("Unsupported!"); }
172 };
173
174 template <typename Config> class MapAllocatorCache {
175 public:
getStats(ScopedString * Str)176 void getStats(ScopedString *Str) {
177 ScopedLock L(Mutex);
178 uptr Integral;
179 uptr Fractional;
180 computePercentage(SuccessfulRetrieves, CallsToRetrieve, &Integral,
181 &Fractional);
182 const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
183 Str->append(
184 "Stats: MapAllocatorCache: EntriesCount: %d, "
185 "MaxEntriesCount: %u, MaxEntrySize: %zu, ReleaseToOsIntervalMs = %d\n",
186 EntriesCount, atomic_load_relaxed(&MaxEntriesCount),
187 atomic_load_relaxed(&MaxEntrySize), Interval >= 0 ? Interval : -1);
188 Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u "
189 "(%zu.%02zu%%)\n",
190 SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
191 for (CachedBlock Entry : Entries) {
192 if (!Entry.isValid())
193 continue;
194 Str->append("StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
195 "BlockSize: %zu %s\n",
196 Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
197 Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
198 }
199 }
200
201 // Ensure the default maximum specified fits the array.
202 static_assert(Config::getDefaultMaxEntriesCount() <=
203 Config::getEntriesArraySize(),
204 "");
205
init(s32 ReleaseToOsInterval)206 void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
207 DCHECK_EQ(EntriesCount, 0U);
208 setOption(Option::MaxCacheEntriesCount,
209 static_cast<sptr>(Config::getDefaultMaxEntriesCount()));
210 setOption(Option::MaxCacheEntrySize,
211 static_cast<sptr>(Config::getDefaultMaxEntrySize()));
212 // The default value in the cache config has the higher priority.
213 if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN)
214 ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs();
215 setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
216 }
217
store(const Options & Options,LargeBlock::Header * H)218 void store(const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
219 if (!canCache(H->CommitSize))
220 return unmap(H);
221
222 bool EntryCached = false;
223 bool EmptyCache = false;
224 const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
225 const u64 Time = getMonotonicTimeFast();
226 const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
227 CachedBlock Entry;
228 Entry.CommitBase = H->CommitBase;
229 Entry.CommitSize = H->CommitSize;
230 Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
231 Entry.MemMap = H->MemMap;
232 Entry.Time = Time;
233 if (useMemoryTagging<Config>(Options)) {
234 if (Interval == 0 && !SCUDO_FUCHSIA) {
235 // Release the memory and make it inaccessible at the same time by
236 // creating a new MAP_NOACCESS mapping on top of the existing mapping.
237 // Fuchsia does not support replacing mappings by creating a new mapping
238 // on top so we just do the two syscalls there.
239 Entry.Time = 0;
240 mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize,
241 Entry.CommitBase, MAP_NOACCESS, Entry.MemMap);
242 } else {
243 Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize,
244 MAP_NOACCESS);
245 }
246 } else if (Interval == 0) {
247 Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
248 Entry.Time = 0;
249 }
250 do {
251 ScopedLock L(Mutex);
252 if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) {
253 // If we get here then memory tagging was disabled in between when we
254 // read Options and when we locked Mutex. We can't insert our entry into
255 // the quarantine or the cache because the permissions would be wrong so
256 // just unmap it.
257 break;
258 }
259 if (Config::getQuarantineSize() && useMemoryTagging<Config>(Options)) {
260 QuarantinePos =
261 (QuarantinePos + 1) % Max(Config::getQuarantineSize(), 1u);
262 if (!Quarantine[QuarantinePos].isValid()) {
263 Quarantine[QuarantinePos] = Entry;
264 return;
265 }
266 CachedBlock PrevEntry = Quarantine[QuarantinePos];
267 Quarantine[QuarantinePos] = Entry;
268 if (OldestTime == 0)
269 OldestTime = Entry.Time;
270 Entry = PrevEntry;
271 }
272 if (EntriesCount >= MaxCount) {
273 if (IsFullEvents++ == 4U)
274 EmptyCache = true;
275 } else {
276 for (u32 I = 0; I < MaxCount; I++) {
277 if (Entries[I].isValid())
278 continue;
279 if (I != 0)
280 Entries[I] = Entries[0];
281 Entries[0] = Entry;
282 EntriesCount++;
283 if (OldestTime == 0)
284 OldestTime = Entry.Time;
285 EntryCached = true;
286 break;
287 }
288 }
289 } while (0);
290 if (EmptyCache)
291 empty();
292 else if (Interval >= 0)
293 releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
294 if (!EntryCached)
295 Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
296 }
297
retrieve(Options Options,uptr Size,uptr Alignment,uptr HeadersSize,LargeBlock::Header ** H,bool * Zeroed)298 bool retrieve(Options Options, uptr Size, uptr Alignment, uptr HeadersSize,
299 LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
300 const uptr PageSize = getPageSizeCached();
301 const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
302 // 10% of the requested size proved to be the optimal choice for
303 // retrieving cached blocks after testing several options.
304 constexpr u32 FragmentedBytesDivisor = 10;
305 bool Found = false;
306 CachedBlock Entry;
307 uptr EntryHeaderPos = 0;
308 {
309 ScopedLock L(Mutex);
310 CallsToRetrieve++;
311 if (EntriesCount == 0)
312 return false;
313 u32 OptimalFitIndex = 0;
314 uptr MinDiff = UINTPTR_MAX;
315 for (u32 I = 0; I < MaxCount; I++) {
316 if (!Entries[I].isValid())
317 continue;
318 const uptr CommitBase = Entries[I].CommitBase;
319 const uptr CommitSize = Entries[I].CommitSize;
320 const uptr AllocPos =
321 roundDown(CommitBase + CommitSize - Size, Alignment);
322 const uptr HeaderPos = AllocPos - HeadersSize;
323 if (HeaderPos > CommitBase + CommitSize)
324 continue;
325 if (HeaderPos < CommitBase ||
326 AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
327 continue;
328 }
329 Found = true;
330 const uptr Diff = HeaderPos - CommitBase;
331 // immediately use a cached block if it's size is close enough to the
332 // requested size.
333 const uptr MaxAllowedFragmentedBytes =
334 (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
335 if (Diff <= MaxAllowedFragmentedBytes) {
336 OptimalFitIndex = I;
337 EntryHeaderPos = HeaderPos;
338 break;
339 }
340 // keep track of the smallest cached block
341 // that is greater than (AllocSize + HeaderSize)
342 if (Diff > MinDiff)
343 continue;
344 OptimalFitIndex = I;
345 MinDiff = Diff;
346 EntryHeaderPos = HeaderPos;
347 }
348 if (Found) {
349 Entry = Entries[OptimalFitIndex];
350 Entries[OptimalFitIndex].invalidate();
351 EntriesCount--;
352 SuccessfulRetrieves++;
353 }
354 }
355 if (!Found)
356 return false;
357
358 *H = reinterpret_cast<LargeBlock::Header *>(
359 LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
360 *Zeroed = Entry.Time == 0;
361 if (useMemoryTagging<Config>(Options))
362 Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
363 uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
364 if (useMemoryTagging<Config>(Options)) {
365 if (*Zeroed) {
366 storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
367 NewBlockBegin);
368 } else if (Entry.BlockBegin < NewBlockBegin) {
369 storeTags(Entry.BlockBegin, NewBlockBegin);
370 } else {
371 storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
372 }
373 }
374 (*H)->CommitBase = Entry.CommitBase;
375 (*H)->CommitSize = Entry.CommitSize;
376 (*H)->MemMap = Entry.MemMap;
377 return true;
378 }
379
canCache(uptr Size)380 bool canCache(uptr Size) {
381 return atomic_load_relaxed(&MaxEntriesCount) != 0U &&
382 Size <= atomic_load_relaxed(&MaxEntrySize);
383 }
384
setOption(Option O,sptr Value)385 bool setOption(Option O, sptr Value) {
386 if (O == Option::ReleaseInterval) {
387 const s32 Interval = Max(
388 Min(static_cast<s32>(Value), Config::getMaxReleaseToOsIntervalMs()),
389 Config::getMinReleaseToOsIntervalMs());
390 atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
391 return true;
392 }
393 if (O == Option::MaxCacheEntriesCount) {
394 const u32 MaxCount = static_cast<u32>(Value);
395 if (MaxCount > Config::getEntriesArraySize())
396 return false;
397 atomic_store_relaxed(&MaxEntriesCount, MaxCount);
398 return true;
399 }
400 if (O == Option::MaxCacheEntrySize) {
401 atomic_store_relaxed(&MaxEntrySize, static_cast<uptr>(Value));
402 return true;
403 }
404 // Not supported by the Secondary Cache, but not an error either.
405 return true;
406 }
407
releaseToOS()408 void releaseToOS() { releaseOlderThan(UINT64_MAX); }
409
disableMemoryTagging()410 void disableMemoryTagging() EXCLUDES(Mutex) {
411 ScopedLock L(Mutex);
412 for (u32 I = 0; I != Config::getQuarantineSize(); ++I) {
413 if (Quarantine[I].isValid()) {
414 MemMapT &MemMap = Quarantine[I].MemMap;
415 MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
416 Quarantine[I].invalidate();
417 }
418 }
419 const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
420 for (u32 I = 0; I < MaxCount; I++) {
421 if (Entries[I].isValid()) {
422 Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
423 Entries[I].CommitSize, 0);
424 }
425 }
426 QuarantinePos = -1U;
427 }
428
disable()429 void disable() NO_THREAD_SAFETY_ANALYSIS { Mutex.lock(); }
430
enable()431 void enable() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
432
unmapTestOnly()433 void unmapTestOnly() { empty(); }
434
435 private:
empty()436 void empty() {
437 MemMapT MapInfo[Config::getEntriesArraySize()];
438 uptr N = 0;
439 {
440 ScopedLock L(Mutex);
441 for (uptr I = 0; I < Config::getEntriesArraySize(); I++) {
442 if (!Entries[I].isValid())
443 continue;
444 MapInfo[N] = Entries[I].MemMap;
445 Entries[I].invalidate();
446 N++;
447 }
448 EntriesCount = 0;
449 IsFullEvents = 0;
450 }
451 for (uptr I = 0; I < N; I++) {
452 MemMapT &MemMap = MapInfo[I];
453 MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
454 }
455 }
456
releaseIfOlderThan(CachedBlock & Entry,u64 Time)457 void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
458 if (!Entry.isValid() || !Entry.Time)
459 return;
460 if (Entry.Time > Time) {
461 if (OldestTime == 0 || Entry.Time < OldestTime)
462 OldestTime = Entry.Time;
463 return;
464 }
465 Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
466 Entry.Time = 0;
467 }
468
releaseOlderThan(u64 Time)469 void releaseOlderThan(u64 Time) EXCLUDES(Mutex) {
470 ScopedLock L(Mutex);
471 if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
472 return;
473 OldestTime = 0;
474 for (uptr I = 0; I < Config::getQuarantineSize(); I++)
475 releaseIfOlderThan(Quarantine[I], Time);
476 for (uptr I = 0; I < Config::getEntriesArraySize(); I++)
477 releaseIfOlderThan(Entries[I], Time);
478 }
479
480 HybridMutex Mutex;
481 u32 EntriesCount GUARDED_BY(Mutex) = 0;
482 u32 QuarantinePos GUARDED_BY(Mutex) = 0;
483 atomic_u32 MaxEntriesCount = {};
484 atomic_uptr MaxEntrySize = {};
485 u64 OldestTime GUARDED_BY(Mutex) = 0;
486 u32 IsFullEvents GUARDED_BY(Mutex) = 0;
487 atomic_s32 ReleaseToOsIntervalMs = {};
488 u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
489 u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;
490
491 CachedBlock Entries[Config::getEntriesArraySize()] GUARDED_BY(Mutex) = {};
492 NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
493 Quarantine GUARDED_BY(Mutex) = {};
494 };
495
496 template <typename Config> class MapAllocator {
497 public:
498 void init(GlobalStats *S,
499 s32 ReleaseToOsInterval = -1) NO_THREAD_SAFETY_ANALYSIS {
500 DCHECK_EQ(AllocatedBytes, 0U);
501 DCHECK_EQ(FreedBytes, 0U);
502 Cache.init(ReleaseToOsInterval);
503 Stats.init();
504 if (LIKELY(S))
505 S->link(&Stats);
506 }
507
508 void *allocate(const Options &Options, uptr Size, uptr AlignmentHint = 0,
509 uptr *BlockEnd = nullptr,
510 FillContentsMode FillContents = NoFill);
511
512 void deallocate(const Options &Options, void *Ptr);
513
getBlockEnd(void * Ptr)514 static uptr getBlockEnd(void *Ptr) {
515 auto *B = LargeBlock::getHeader<Config>(Ptr);
516 return B->CommitBase + B->CommitSize;
517 }
518
getBlockSize(void * Ptr)519 static uptr getBlockSize(void *Ptr) {
520 return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
521 }
522
getHeadersSize()523 static constexpr uptr getHeadersSize() {
524 return Chunk::getHeaderSize() + LargeBlock::getHeaderSize();
525 }
526
disable()527 void disable() NO_THREAD_SAFETY_ANALYSIS {
528 Mutex.lock();
529 Cache.disable();
530 }
531
enable()532 void enable() NO_THREAD_SAFETY_ANALYSIS {
533 Cache.enable();
534 Mutex.unlock();
535 }
536
iterateOverBlocks(F Callback)537 template <typename F> void iterateOverBlocks(F Callback) const {
538 Mutex.assertHeld();
539
540 for (const auto &H : InUseBlocks) {
541 uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize();
542 if (allocatorSupportsMemoryTagging<Config>())
543 Ptr = untagPointer(Ptr);
544 Callback(Ptr);
545 }
546 }
547
canCache(uptr Size)548 bool canCache(uptr Size) { return Cache.canCache(Size); }
549
setOption(Option O,sptr Value)550 bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
551
releaseToOS()552 void releaseToOS() { Cache.releaseToOS(); }
553
disableMemoryTagging()554 void disableMemoryTagging() { Cache.disableMemoryTagging(); }
555
unmapTestOnly()556 void unmapTestOnly() { Cache.unmapTestOnly(); }
557
558 void getStats(ScopedString *Str);
559
560 private:
561 typename Config::template CacheT<typename Config::CacheConfig> Cache;
562
563 mutable HybridMutex Mutex;
564 DoublyLinkedList<LargeBlock::Header> InUseBlocks GUARDED_BY(Mutex);
565 uptr AllocatedBytes GUARDED_BY(Mutex) = 0;
566 uptr FreedBytes GUARDED_BY(Mutex) = 0;
567 uptr FragmentedBytes GUARDED_BY(Mutex) = 0;
568 uptr LargestSize GUARDED_BY(Mutex) = 0;
569 u32 NumberOfAllocs GUARDED_BY(Mutex) = 0;
570 u32 NumberOfFrees GUARDED_BY(Mutex) = 0;
571 LocalStats Stats GUARDED_BY(Mutex);
572 };
573
574 // As with the Primary, the size passed to this function includes any desired
575 // alignment, so that the frontend can align the user allocation. The hint
576 // parameter allows us to unmap spurious memory when dealing with larger
577 // (greater than a page) alignments on 32-bit platforms.
578 // Due to the sparsity of address space available on those platforms, requesting
579 // an allocation from the Secondary with a large alignment would end up wasting
580 // VA space (even though we are not committing the whole thing), hence the need
581 // to trim off some of the reserved space.
582 // For allocations requested with an alignment greater than or equal to a page,
583 // the committed memory will amount to something close to Size - AlignmentHint
584 // (pending rounding and headers).
585 template <typename Config>
allocate(const Options & Options,uptr Size,uptr Alignment,uptr * BlockEndPtr,FillContentsMode FillContents)586 void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
587 uptr Alignment, uptr *BlockEndPtr,
588 FillContentsMode FillContents) {
589 if (Options.get(OptionBit::AddLargeAllocationSlack))
590 Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG;
591 Alignment = Max(Alignment, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG);
592 const uptr PageSize = getPageSizeCached();
593
594 // Note that cached blocks may have aligned address already. Thus we simply
595 // pass the required size (`Size` + `getHeadersSize()`) to do cache look up.
596 const uptr MinNeededSizeForCache = roundUp(Size + getHeadersSize(), PageSize);
597
598 if (Alignment < PageSize && Cache.canCache(MinNeededSizeForCache)) {
599 LargeBlock::Header *H;
600 bool Zeroed;
601 if (Cache.retrieve(Options, Size, Alignment, getHeadersSize(), &H,
602 &Zeroed)) {
603 const uptr BlockEnd = H->CommitBase + H->CommitSize;
604 if (BlockEndPtr)
605 *BlockEndPtr = BlockEnd;
606 uptr HInt = reinterpret_cast<uptr>(H);
607 if (allocatorSupportsMemoryTagging<Config>())
608 HInt = untagPointer(HInt);
609 const uptr PtrInt = HInt + LargeBlock::getHeaderSize();
610 void *Ptr = reinterpret_cast<void *>(PtrInt);
611 if (FillContents && !Zeroed)
612 memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
613 BlockEnd - PtrInt);
614 {
615 ScopedLock L(Mutex);
616 InUseBlocks.push_back(H);
617 AllocatedBytes += H->CommitSize;
618 FragmentedBytes += H->MemMap.getCapacity() - H->CommitSize;
619 NumberOfAllocs++;
620 Stats.add(StatAllocated, H->CommitSize);
621 Stats.add(StatMapped, H->MemMap.getCapacity());
622 }
623 return Ptr;
624 }
625 }
626
627 uptr RoundedSize =
628 roundUp(roundUp(Size, Alignment) + getHeadersSize(), PageSize);
629 if (Alignment > PageSize)
630 RoundedSize += Alignment - PageSize;
631
632 ReservedMemoryT ReservedMemory;
633 const uptr MapSize = RoundedSize + 2 * PageSize;
634 if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr,
635 MAP_ALLOWNOMEM))) {
636 return nullptr;
637 }
638
639 // Take the entire ownership of reserved region.
640 MemMapT MemMap = ReservedMemory.dispatch(ReservedMemory.getBase(),
641 ReservedMemory.getCapacity());
642 uptr MapBase = MemMap.getBase();
643 uptr CommitBase = MapBase + PageSize;
644 uptr MapEnd = MapBase + MapSize;
645
646 // In the unlikely event of alignments larger than a page, adjust the amount
647 // of memory we want to commit, and trim the extra memory.
648 if (UNLIKELY(Alignment >= PageSize)) {
649 // For alignments greater than or equal to a page, the user pointer (eg: the
650 // pointer that is returned by the C or C++ allocation APIs) ends up on a
651 // page boundary , and our headers will live in the preceding page.
652 CommitBase = roundUp(MapBase + PageSize + 1, Alignment) - PageSize;
653 const uptr NewMapBase = CommitBase - PageSize;
654 DCHECK_GE(NewMapBase, MapBase);
655 // We only trim the extra memory on 32-bit platforms: 64-bit platforms
656 // are less constrained memory wise, and that saves us two syscalls.
657 if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
658 MemMap.unmap(MapBase, NewMapBase - MapBase);
659 MapBase = NewMapBase;
660 }
661 const uptr NewMapEnd =
662 CommitBase + PageSize + roundUp(Size, PageSize) + PageSize;
663 DCHECK_LE(NewMapEnd, MapEnd);
664 if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
665 MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd);
666 MapEnd = NewMapEnd;
667 }
668 }
669
670 const uptr CommitSize = MapEnd - PageSize - CommitBase;
671 const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment);
672 if (!mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0,
673 MemMap)) {
674 MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
675 return nullptr;
676 }
677 const uptr HeaderPos = AllocPos - getHeadersSize();
678 LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
679 LargeBlock::addHeaderTag<Config>(HeaderPos));
680 if (useMemoryTagging<Config>(Options))
681 storeTags(LargeBlock::addHeaderTag<Config>(CommitBase),
682 reinterpret_cast<uptr>(H + 1));
683 H->CommitBase = CommitBase;
684 H->CommitSize = CommitSize;
685 H->MemMap = MemMap;
686 if (BlockEndPtr)
687 *BlockEndPtr = CommitBase + CommitSize;
688 {
689 ScopedLock L(Mutex);
690 InUseBlocks.push_back(H);
691 AllocatedBytes += CommitSize;
692 FragmentedBytes += H->MemMap.getCapacity() - CommitSize;
693 if (LargestSize < CommitSize)
694 LargestSize = CommitSize;
695 NumberOfAllocs++;
696 Stats.add(StatAllocated, CommitSize);
697 Stats.add(StatMapped, H->MemMap.getCapacity());
698 }
699 return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize());
700 }
701
702 template <typename Config>
deallocate(const Options & Options,void * Ptr)703 void MapAllocator<Config>::deallocate(const Options &Options, void *Ptr)
704 EXCLUDES(Mutex) {
705 LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr);
706 const uptr CommitSize = H->CommitSize;
707 {
708 ScopedLock L(Mutex);
709 InUseBlocks.remove(H);
710 FreedBytes += CommitSize;
711 FragmentedBytes -= H->MemMap.getCapacity() - CommitSize;
712 NumberOfFrees++;
713 Stats.sub(StatAllocated, CommitSize);
714 Stats.sub(StatMapped, H->MemMap.getCapacity());
715 }
716 Cache.store(Options, H);
717 }
718
719 template <typename Config>
getStats(ScopedString * Str)720 void MapAllocator<Config>::getStats(ScopedString *Str) EXCLUDES(Mutex) {
721 ScopedLock L(Mutex);
722 Str->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times "
723 "(%zuK), remains %u (%zuK) max %zuM, Fragmented %zuK\n",
724 NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
725 FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
726 (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20,
727 FragmentedBytes >> 10);
728 Cache.getStats(Str);
729 }
730
731 } // namespace scudo
732
733 #endif // SCUDO_SECONDARY_H_
734