1 //===-- secondary.h ---------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #ifndef SCUDO_SECONDARY_H_
10 #define SCUDO_SECONDARY_H_
11
12 #include "chunk.h"
13 #include "common.h"
14 #include "list.h"
15 #include "mem_map.h"
16 #include "memtag.h"
17 #include "mutex.h"
18 #include "options.h"
19 #include "stats.h"
20 #include "string_utils.h"
21 #include "thread_annotations.h"
22
23 namespace scudo {
24
25 // This allocator wraps the platform allocation primitives, and as such is on
26 // the slower side and should preferably be used for larger sized allocations.
27 // Blocks allocated will be preceded and followed by a guard page, and hold
28 // their own header that is not checksummed: the guard pages and the Combined
29 // header should be enough for our purpose.
30
31 namespace LargeBlock {
32
33 struct alignas(Max<uptr>(archSupportsMemoryTagging()
34 ? archMemoryTagGranuleSize()
35 : 1,
36 1U << SCUDO_MIN_ALIGNMENT_LOG)) Header {
37 LargeBlock::Header *Prev;
38 LargeBlock::Header *Next;
39 uptr CommitBase;
40 uptr CommitSize;
41 MemMapT MemMap;
42 };
43
44 static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, "");
45 static_assert(!archSupportsMemoryTagging() ||
46 sizeof(Header) % archMemoryTagGranuleSize() == 0,
47 "");
48
getHeaderSize()49 constexpr uptr getHeaderSize() { return sizeof(Header); }
50
addHeaderTag(uptr Ptr)51 template <typename Config> static uptr addHeaderTag(uptr Ptr) {
52 if (allocatorSupportsMemoryTagging<Config>())
53 return addFixedTag(Ptr, 1);
54 return Ptr;
55 }
56
getHeader(uptr Ptr)57 template <typename Config> static Header *getHeader(uptr Ptr) {
58 return reinterpret_cast<Header *>(addHeaderTag<Config>(Ptr)) - 1;
59 }
60
getHeader(const void * Ptr)61 template <typename Config> static Header *getHeader(const void *Ptr) {
62 return getHeader<Config>(reinterpret_cast<uptr>(Ptr));
63 }
64
65 } // namespace LargeBlock
66
unmap(LargeBlock::Header * H)67 static void unmap(LargeBlock::Header *H) {
68 // Note that the `H->MapMap` is stored on the pages managed by itself. Take
69 // over the ownership before unmap() so that any operation along with unmap()
70 // won't touch inaccessible pages.
71 MemMapT MemMap = H->MemMap;
72 MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
73 }
74
75 class MapAllocatorNoCache {
76 public:
init(UNUSED s32 ReleaseToOsInterval)77 void init(UNUSED s32 ReleaseToOsInterval) {}
retrieve(UNUSED Options Options,UNUSED uptr Size,UNUSED uptr Alignment,UNUSED LargeBlock::Header ** H,UNUSED bool * Zeroed)78 bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
79 UNUSED LargeBlock::Header **H, UNUSED bool *Zeroed) {
80 return false;
81 }
store(UNUSED Options Options,LargeBlock::Header * H)82 void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
canCache(UNUSED uptr Size)83 bool canCache(UNUSED uptr Size) { return false; }
disable()84 void disable() {}
enable()85 void enable() {}
releaseToOS()86 void releaseToOS() {}
disableMemoryTagging()87 void disableMemoryTagging() {}
unmapTestOnly()88 void unmapTestOnly() {}
setOption(Option O,UNUSED sptr Value)89 bool setOption(Option O, UNUSED sptr Value) {
90 if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount ||
91 O == Option::MaxCacheEntrySize)
92 return false;
93 // Not supported by the Secondary Cache, but not an error either.
94 return true;
95 }
96 };
97
98 static const uptr MaxUnusedCachePages = 4U;
99
100 template <typename Config>
mapSecondary(Options Options,uptr CommitBase,uptr CommitSize,uptr AllocPos,uptr Flags,MemMapT & MemMap)101 void mapSecondary(Options Options, uptr CommitBase, uptr CommitSize,
102 uptr AllocPos, uptr Flags, MemMapT &MemMap) {
103 const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * getPageSizeCached();
104 if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) {
105 const uptr UntaggedPos = Max(AllocPos, CommitBase + MaxUnusedCacheBytes);
106 MemMap.remap(CommitBase, UntaggedPos - CommitBase, "scudo:secondary",
107 MAP_RESIZABLE | MAP_MEMTAG | Flags);
108 MemMap.remap(UntaggedPos, CommitBase + CommitSize - UntaggedPos,
109 "scudo:secondary", MAP_RESIZABLE | Flags);
110 } else {
111 const uptr RemapFlags =
112 MAP_RESIZABLE | (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) |
113 Flags;
114 MemMap.remap(CommitBase, CommitSize, "scudo:secondary", RemapFlags);
115 }
116 }
117
118 // Template specialization to avoid producing zero-length array
119 template <typename T, size_t Size> class NonZeroLengthArray {
120 public:
121 T &operator[](uptr Idx) { return values[Idx]; }
122
123 private:
124 T values[Size];
125 };
126 template <typename T> class NonZeroLengthArray<T, 0> {
127 public:
128 T &operator[](uptr UNUSED Idx) { UNREACHABLE("Unsupported!"); }
129 };
130
131 template <typename Config> class MapAllocatorCache {
132 public:
133 // Ensure the default maximum specified fits the array.
134 static_assert(Config::SecondaryCacheDefaultMaxEntriesCount <=
135 Config::SecondaryCacheEntriesArraySize,
136 "");
137
init(s32 ReleaseToOsInterval)138 void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
139 DCHECK_EQ(EntriesCount, 0U);
140 setOption(Option::MaxCacheEntriesCount,
141 static_cast<sptr>(Config::SecondaryCacheDefaultMaxEntriesCount));
142 setOption(Option::MaxCacheEntrySize,
143 static_cast<sptr>(Config::SecondaryCacheDefaultMaxEntrySize));
144 setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
145 }
146
store(Options Options,LargeBlock::Header * H)147 void store(Options Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
148 if (!canCache(H->CommitSize))
149 return unmap(H);
150
151 bool EntryCached = false;
152 bool EmptyCache = false;
153 const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
154 const u64 Time = getMonotonicTime();
155 const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
156 CachedBlock Entry;
157 Entry.CommitBase = H->CommitBase;
158 Entry.CommitSize = H->CommitSize;
159 Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
160 Entry.MemMap = H->MemMap;
161 Entry.Time = Time;
162 if (useMemoryTagging<Config>(Options)) {
163 if (Interval == 0 && !SCUDO_FUCHSIA) {
164 // Release the memory and make it inaccessible at the same time by
165 // creating a new MAP_NOACCESS mapping on top of the existing mapping.
166 // Fuchsia does not support replacing mappings by creating a new mapping
167 // on top so we just do the two syscalls there.
168 Entry.Time = 0;
169 mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize,
170 Entry.CommitBase, MAP_NOACCESS, Entry.MemMap);
171 } else {
172 Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize,
173 MAP_NOACCESS);
174 }
175 } else if (Interval == 0) {
176 Entry.MemMap.releasePagesToOS(Entry.CommitBase, Entry.CommitSize);
177 Entry.Time = 0;
178 }
179 do {
180 ScopedLock L(Mutex);
181 if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) {
182 // If we get here then memory tagging was disabled in between when we
183 // read Options and when we locked Mutex. We can't insert our entry into
184 // the quarantine or the cache because the permissions would be wrong so
185 // just unmap it.
186 break;
187 }
188 if (Config::SecondaryCacheQuarantineSize &&
189 useMemoryTagging<Config>(Options)) {
190 QuarantinePos =
191 (QuarantinePos + 1) % Max(Config::SecondaryCacheQuarantineSize, 1u);
192 if (!Quarantine[QuarantinePos].CommitBase) {
193 Quarantine[QuarantinePos] = Entry;
194 return;
195 }
196 CachedBlock PrevEntry = Quarantine[QuarantinePos];
197 Quarantine[QuarantinePos] = Entry;
198 if (OldestTime == 0)
199 OldestTime = Entry.Time;
200 Entry = PrevEntry;
201 }
202 if (EntriesCount >= MaxCount) {
203 if (IsFullEvents++ == 4U)
204 EmptyCache = true;
205 } else {
206 for (u32 I = 0; I < MaxCount; I++) {
207 if (Entries[I].CommitBase)
208 continue;
209 if (I != 0)
210 Entries[I] = Entries[0];
211 Entries[0] = Entry;
212 EntriesCount++;
213 if (OldestTime == 0)
214 OldestTime = Entry.Time;
215 EntryCached = true;
216 break;
217 }
218 }
219 } while (0);
220 if (EmptyCache)
221 empty();
222 else if (Interval >= 0)
223 releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
224 if (!EntryCached)
225 Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
226 }
227
retrieve(Options Options,uptr Size,uptr Alignment,LargeBlock::Header ** H,bool * Zeroed)228 bool retrieve(Options Options, uptr Size, uptr Alignment,
229 LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
230 const uptr PageSize = getPageSizeCached();
231 const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
232 bool Found = false;
233 CachedBlock Entry;
234 uptr HeaderPos = 0;
235 {
236 ScopedLock L(Mutex);
237 if (EntriesCount == 0)
238 return false;
239 for (u32 I = 0; I < MaxCount; I++) {
240 const uptr CommitBase = Entries[I].CommitBase;
241 if (!CommitBase)
242 continue;
243 const uptr CommitSize = Entries[I].CommitSize;
244 const uptr AllocPos =
245 roundDown(CommitBase + CommitSize - Size, Alignment);
246 HeaderPos =
247 AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
248 if (HeaderPos > CommitBase + CommitSize)
249 continue;
250 if (HeaderPos < CommitBase ||
251 AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
252 continue;
253 }
254 Found = true;
255 Entry = Entries[I];
256 Entries[I].CommitBase = 0;
257 EntriesCount--;
258 break;
259 }
260 }
261 if (!Found)
262 return false;
263
264 *H = reinterpret_cast<LargeBlock::Header *>(
265 LargeBlock::addHeaderTag<Config>(HeaderPos));
266 *Zeroed = Entry.Time == 0;
267 if (useMemoryTagging<Config>(Options))
268 Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
269 uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
270 if (useMemoryTagging<Config>(Options)) {
271 if (*Zeroed) {
272 storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
273 NewBlockBegin);
274 } else if (Entry.BlockBegin < NewBlockBegin) {
275 storeTags(Entry.BlockBegin, NewBlockBegin);
276 } else {
277 storeTags(untagPointer(NewBlockBegin),
278 untagPointer(Entry.BlockBegin));
279 }
280 }
281 (*H)->CommitBase = Entry.CommitBase;
282 (*H)->CommitSize = Entry.CommitSize;
283 (*H)->MemMap = Entry.MemMap;
284 return true;
285 }
286
canCache(uptr Size)287 bool canCache(uptr Size) {
288 return atomic_load_relaxed(&MaxEntriesCount) != 0U &&
289 Size <= atomic_load_relaxed(&MaxEntrySize);
290 }
291
setOption(Option O,sptr Value)292 bool setOption(Option O, sptr Value) {
293 if (O == Option::ReleaseInterval) {
294 const s32 Interval =
295 Max(Min(static_cast<s32>(Value),
296 Config::SecondaryCacheMaxReleaseToOsIntervalMs),
297 Config::SecondaryCacheMinReleaseToOsIntervalMs);
298 atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
299 return true;
300 }
301 if (O == Option::MaxCacheEntriesCount) {
302 const u32 MaxCount = static_cast<u32>(Value);
303 if (MaxCount > Config::SecondaryCacheEntriesArraySize)
304 return false;
305 atomic_store_relaxed(&MaxEntriesCount, MaxCount);
306 return true;
307 }
308 if (O == Option::MaxCacheEntrySize) {
309 atomic_store_relaxed(&MaxEntrySize, static_cast<uptr>(Value));
310 return true;
311 }
312 // Not supported by the Secondary Cache, but not an error either.
313 return true;
314 }
315
releaseToOS()316 void releaseToOS() { releaseOlderThan(UINT64_MAX); }
317
disableMemoryTagging()318 void disableMemoryTagging() EXCLUDES(Mutex) {
319 ScopedLock L(Mutex);
320 for (u32 I = 0; I != Config::SecondaryCacheQuarantineSize; ++I) {
321 if (Quarantine[I].CommitBase) {
322 MemMapT &MemMap = Quarantine[I].MemMap;
323 MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
324 Quarantine[I].CommitBase = 0;
325 }
326 }
327 const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
328 for (u32 I = 0; I < MaxCount; I++) {
329 if (Entries[I].CommitBase) {
330 Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
331 Entries[I].CommitSize, 0);
332 }
333 }
334 QuarantinePos = -1U;
335 }
336
disable()337 void disable() NO_THREAD_SAFETY_ANALYSIS { Mutex.lock(); }
338
enable()339 void enable() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
340
unmapTestOnly()341 void unmapTestOnly() { empty(); }
342
343 private:
empty()344 void empty() {
345 MemMapT MapInfo[Config::SecondaryCacheEntriesArraySize];
346 uptr N = 0;
347 {
348 ScopedLock L(Mutex);
349 for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++) {
350 if (!Entries[I].CommitBase)
351 continue;
352 MapInfo[N] = Entries[I].MemMap;
353 Entries[I].CommitBase = 0;
354 N++;
355 }
356 EntriesCount = 0;
357 IsFullEvents = 0;
358 }
359 for (uptr I = 0; I < N; I++) {
360 MemMapT &MemMap = MapInfo[I];
361 MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
362 }
363 }
364
365 struct CachedBlock {
366 uptr CommitBase = 0;
367 uptr CommitSize = 0;
368 uptr BlockBegin = 0;
369 MemMapT MemMap = {};
370 u64 Time = 0;
371 };
372
releaseIfOlderThan(CachedBlock & Entry,u64 Time)373 void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
374 if (!Entry.CommitBase || !Entry.Time)
375 return;
376 if (Entry.Time > Time) {
377 if (OldestTime == 0 || Entry.Time < OldestTime)
378 OldestTime = Entry.Time;
379 return;
380 }
381 Entry.MemMap.releasePagesToOS(Entry.CommitBase, Entry.CommitSize);
382 Entry.Time = 0;
383 }
384
releaseOlderThan(u64 Time)385 void releaseOlderThan(u64 Time) EXCLUDES(Mutex) {
386 ScopedLock L(Mutex);
387 if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
388 return;
389 OldestTime = 0;
390 for (uptr I = 0; I < Config::SecondaryCacheQuarantineSize; I++)
391 releaseIfOlderThan(Quarantine[I], Time);
392 for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++)
393 releaseIfOlderThan(Entries[I], Time);
394 }
395
396 HybridMutex Mutex;
397 u32 EntriesCount GUARDED_BY(Mutex) = 0;
398 u32 QuarantinePos GUARDED_BY(Mutex) = 0;
399 atomic_u32 MaxEntriesCount = {};
400 atomic_uptr MaxEntrySize = {};
401 u64 OldestTime GUARDED_BY(Mutex) = 0;
402 u32 IsFullEvents GUARDED_BY(Mutex) = 0;
403 atomic_s32 ReleaseToOsIntervalMs = {};
404
405 CachedBlock
406 Entries[Config::SecondaryCacheEntriesArraySize] GUARDED_BY(Mutex) = {};
407 NonZeroLengthArray<CachedBlock, Config::SecondaryCacheQuarantineSize>
408 Quarantine GUARDED_BY(Mutex) = {};
409 };
410
411 template <typename Config> class MapAllocator {
412 public:
413 void init(GlobalStats *S,
414 s32 ReleaseToOsInterval = -1) NO_THREAD_SAFETY_ANALYSIS {
415 DCHECK_EQ(AllocatedBytes, 0U);
416 DCHECK_EQ(FreedBytes, 0U);
417 Cache.init(ReleaseToOsInterval);
418 Stats.init();
419 if (LIKELY(S))
420 S->link(&Stats);
421 }
422
423 void *allocate(Options Options, uptr Size, uptr AlignmentHint = 0,
424 uptr *BlockEnd = nullptr,
425 FillContentsMode FillContents = NoFill);
426
427 void deallocate(Options Options, void *Ptr);
428
getBlockEnd(void * Ptr)429 static uptr getBlockEnd(void *Ptr) {
430 auto *B = LargeBlock::getHeader<Config>(Ptr);
431 return B->CommitBase + B->CommitSize;
432 }
433
getBlockSize(void * Ptr)434 static uptr getBlockSize(void *Ptr) {
435 return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
436 }
437
438 void getStats(ScopedString *Str);
439
disable()440 void disable() NO_THREAD_SAFETY_ANALYSIS {
441 Mutex.lock();
442 Cache.disable();
443 }
444
enable()445 void enable() NO_THREAD_SAFETY_ANALYSIS {
446 Cache.enable();
447 Mutex.unlock();
448 }
449
iterateOverBlocks(F Callback)450 template <typename F> void iterateOverBlocks(F Callback) const {
451 Mutex.assertHeld();
452
453 for (const auto &H : InUseBlocks) {
454 uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize();
455 if (allocatorSupportsMemoryTagging<Config>())
456 Ptr = untagPointer(Ptr);
457 Callback(Ptr);
458 }
459 }
460
canCache(uptr Size)461 bool canCache(uptr Size) { return Cache.canCache(Size); }
462
setOption(Option O,sptr Value)463 bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
464
releaseToOS()465 void releaseToOS() { Cache.releaseToOS(); }
466
disableMemoryTagging()467 void disableMemoryTagging() { Cache.disableMemoryTagging(); }
468
unmapTestOnly()469 void unmapTestOnly() { Cache.unmapTestOnly(); }
470
471 private:
472 typename Config::SecondaryCache Cache;
473
474 mutable HybridMutex Mutex;
475 DoublyLinkedList<LargeBlock::Header> InUseBlocks GUARDED_BY(Mutex);
476 uptr AllocatedBytes GUARDED_BY(Mutex) = 0;
477 uptr FreedBytes GUARDED_BY(Mutex) = 0;
478 uptr LargestSize GUARDED_BY(Mutex) = 0;
479 u32 NumberOfAllocs GUARDED_BY(Mutex) = 0;
480 u32 NumberOfFrees GUARDED_BY(Mutex) = 0;
481 LocalStats Stats GUARDED_BY(Mutex);
482 };
483
484 // As with the Primary, the size passed to this function includes any desired
485 // alignment, so that the frontend can align the user allocation. The hint
486 // parameter allows us to unmap spurious memory when dealing with larger
487 // (greater than a page) alignments on 32-bit platforms.
488 // Due to the sparsity of address space available on those platforms, requesting
489 // an allocation from the Secondary with a large alignment would end up wasting
490 // VA space (even though we are not committing the whole thing), hence the need
491 // to trim off some of the reserved space.
492 // For allocations requested with an alignment greater than or equal to a page,
493 // the committed memory will amount to something close to Size - AlignmentHint
494 // (pending rounding and headers).
495 template <typename Config>
allocate(Options Options,uptr Size,uptr Alignment,uptr * BlockEndPtr,FillContentsMode FillContents)496 void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
497 uptr *BlockEndPtr,
498 FillContentsMode FillContents) {
499 if (Options.get(OptionBit::AddLargeAllocationSlack))
500 Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG;
501 Alignment = Max(Alignment, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG);
502 const uptr PageSize = getPageSizeCached();
503 uptr RoundedSize =
504 roundUp(roundUp(Size, Alignment) + LargeBlock::getHeaderSize() +
505 Chunk::getHeaderSize(),
506 PageSize);
507 if (Alignment > PageSize)
508 RoundedSize += Alignment - PageSize;
509
510 if (Alignment < PageSize && Cache.canCache(RoundedSize)) {
511 LargeBlock::Header *H;
512 bool Zeroed;
513 if (Cache.retrieve(Options, Size, Alignment, &H, &Zeroed)) {
514 const uptr BlockEnd = H->CommitBase + H->CommitSize;
515 if (BlockEndPtr)
516 *BlockEndPtr = BlockEnd;
517 uptr HInt = reinterpret_cast<uptr>(H);
518 if (allocatorSupportsMemoryTagging<Config>())
519 HInt = untagPointer(HInt);
520 const uptr PtrInt = HInt + LargeBlock::getHeaderSize();
521 void *Ptr = reinterpret_cast<void *>(PtrInt);
522 if (FillContents && !Zeroed)
523 memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
524 BlockEnd - PtrInt);
525 {
526 ScopedLock L(Mutex);
527 InUseBlocks.push_back(H);
528 AllocatedBytes += H->CommitSize;
529 NumberOfAllocs++;
530 Stats.add(StatAllocated, H->CommitSize);
531 Stats.add(StatMapped, H->MemMap.getCapacity());
532 }
533 return Ptr;
534 }
535 }
536
537 ReservedMemoryT ReservedMemory;
538 const uptr MapSize = RoundedSize + 2 * PageSize;
539 ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr, MAP_ALLOWNOMEM);
540
541 // Take the entire ownership of reserved region.
542 MemMapT MemMap = ReservedMemory.dispatch(ReservedMemory.getBase(),
543 ReservedMemory.getCapacity());
544 uptr MapBase = MemMap.getBase();
545 if (UNLIKELY(!MapBase))
546 return nullptr;
547 uptr CommitBase = MapBase + PageSize;
548 uptr MapEnd = MapBase + MapSize;
549
550 // In the unlikely event of alignments larger than a page, adjust the amount
551 // of memory we want to commit, and trim the extra memory.
552 if (UNLIKELY(Alignment >= PageSize)) {
553 // For alignments greater than or equal to a page, the user pointer (eg: the
554 // pointer that is returned by the C or C++ allocation APIs) ends up on a
555 // page boundary , and our headers will live in the preceding page.
556 CommitBase = roundUp(MapBase + PageSize + 1, Alignment) - PageSize;
557 const uptr NewMapBase = CommitBase - PageSize;
558 DCHECK_GE(NewMapBase, MapBase);
559 // We only trim the extra memory on 32-bit platforms: 64-bit platforms
560 // are less constrained memory wise, and that saves us two syscalls.
561 if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
562 MemMap.unmap(MapBase, NewMapBase - MapBase);
563 MapBase = NewMapBase;
564 }
565 const uptr NewMapEnd =
566 CommitBase + PageSize + roundUp(Size, PageSize) + PageSize;
567 DCHECK_LE(NewMapEnd, MapEnd);
568 if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
569 MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd);
570 MapEnd = NewMapEnd;
571 }
572 }
573
574 const uptr CommitSize = MapEnd - PageSize - CommitBase;
575 const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment);
576 mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0, MemMap);
577 const uptr HeaderPos =
578 AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
579 LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
580 LargeBlock::addHeaderTag<Config>(HeaderPos));
581 if (useMemoryTagging<Config>(Options))
582 storeTags(LargeBlock::addHeaderTag<Config>(CommitBase),
583 reinterpret_cast<uptr>(H + 1));
584 H->CommitBase = CommitBase;
585 H->CommitSize = CommitSize;
586 H->MemMap = MemMap;
587 if (BlockEndPtr)
588 *BlockEndPtr = CommitBase + CommitSize;
589 {
590 ScopedLock L(Mutex);
591 InUseBlocks.push_back(H);
592 AllocatedBytes += CommitSize;
593 if (LargestSize < CommitSize)
594 LargestSize = CommitSize;
595 NumberOfAllocs++;
596 Stats.add(StatAllocated, CommitSize);
597 Stats.add(StatMapped, H->MemMap.getCapacity());
598 }
599 return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize());
600 }
601
602 template <typename Config>
deallocate(Options Options,void * Ptr)603 void MapAllocator<Config>::deallocate(Options Options, void *Ptr)
604 EXCLUDES(Mutex) {
605 LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr);
606 const uptr CommitSize = H->CommitSize;
607 {
608 ScopedLock L(Mutex);
609 InUseBlocks.remove(H);
610 FreedBytes += CommitSize;
611 NumberOfFrees++;
612 Stats.sub(StatAllocated, CommitSize);
613 Stats.sub(StatMapped, H->MemMap.getCapacity());
614 }
615 Cache.store(Options, H);
616 }
617
618 template <typename Config>
getStats(ScopedString * Str)619 void MapAllocator<Config>::getStats(ScopedString *Str) EXCLUDES(Mutex) {
620 ScopedLock L(Mutex);
621 Str->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times "
622 "(%zuK), remains %u (%zuK) max %zuM\n",
623 NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
624 FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
625 (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20);
626 }
627
628 } // namespace scudo
629
630 #endif // SCUDO_SECONDARY_H_
631