• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- sanitizer_allocator.h -----------------------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Specialized memory allocator for ThreadSanitizer, MemorySanitizer, etc.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef SANITIZER_ALLOCATOR_H
15 #define SANITIZER_ALLOCATOR_H
16 
17 #include "sanitizer_internal_defs.h"
18 #include "sanitizer_common.h"
19 #include "sanitizer_libc.h"
20 #include "sanitizer_list.h"
21 #include "sanitizer_mutex.h"
22 #include "sanitizer_lfstack.h"
23 
24 namespace __sanitizer {
25 
26 // Prints error message and kills the program.
27 void NORETURN ReportAllocatorCannotReturnNull();
28 
29 // SizeClassMap maps allocation sizes into size classes and back.
30 // Class 0 corresponds to size 0.
31 // Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
32 // Next 4 classes: 256 + i * 64  (i = 1 to 4).
33 // Next 4 classes: 512 + i * 128 (i = 1 to 4).
34 // ...
35 // Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).
36 // Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
37 //
38 // This structure of the size class map gives us:
39 //   - Efficient table-free class-to-size and size-to-class functions.
40 //   - Difference between two consequent size classes is betweed 14% and 25%
41 //
42 // This class also gives a hint to a thread-caching allocator about the amount
43 // of chunks that need to be cached per-thread:
44 //  - kMaxNumCached is the maximal number of chunks per size class.
45 //  - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
46 //
47 // Part of output of SizeClassMap::Print():
48 // c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
49 // c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
50 // c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2
51 // c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3
52 // c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4
53 // c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5
54 // c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6
55 // c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7
56 //
57 // c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8
58 // c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9
59 // c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10
60 // c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11
61 // c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12
62 // c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13
63 // c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14
64 // c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
65 //
66 // c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
67 // c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17
68 // c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18
69 // c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19
70 //
71 // c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20
72 // c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21
73 // c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22
74 // c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23
75 //
76 // c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24
77 // c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25
78 // c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26
79 // c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27
80 //
81 // ...
82 //
83 // c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
84 // c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
85 // c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
86 // c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
87 //
88 // c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
89 
90 template <uptr kMaxSizeLog, uptr kMaxNumCachedT, uptr kMaxBytesCachedLog>
91 class SizeClassMap {
92   static const uptr kMinSizeLog = 4;
93   static const uptr kMidSizeLog = kMinSizeLog + 4;
94   static const uptr kMinSize = 1 << kMinSizeLog;
95   static const uptr kMidSize = 1 << kMidSizeLog;
96   static const uptr kMidClass = kMidSize / kMinSize;
97   static const uptr S = 2;
98   static const uptr M = (1 << S) - 1;
99 
100  public:
101   static const uptr kMaxNumCached = kMaxNumCachedT;
102   // We transfer chunks between central and thread-local free lists in batches.
103   // For small size classes we allocate batches separately.
104   // For large size classes we use one of the chunks to store the batch.
105   struct TransferBatch {
106     TransferBatch *next;
107     uptr count;
108     void *batch[kMaxNumCached];
109   };
110 
111   static const uptr kMaxSize = 1UL << kMaxSizeLog;
112   static const uptr kNumClasses =
113       kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1;
114   COMPILER_CHECK(kNumClasses >= 32 && kNumClasses <= 256);
115   static const uptr kNumClassesRounded =
116       kNumClasses == 32  ? 32 :
117       kNumClasses <= 64  ? 64 :
118       kNumClasses <= 128 ? 128 : 256;
119 
Size(uptr class_id)120   static uptr Size(uptr class_id) {
121     if (class_id <= kMidClass)
122       return kMinSize * class_id;
123     class_id -= kMidClass;
124     uptr t = kMidSize << (class_id >> S);
125     return t + (t >> S) * (class_id & M);
126   }
127 
ClassID(uptr size)128   static uptr ClassID(uptr size) {
129     if (size <= kMidSize)
130       return (size + kMinSize - 1) >> kMinSizeLog;
131     if (size > kMaxSize) return 0;
132     uptr l = MostSignificantSetBitIndex(size);
133     uptr hbits = (size >> (l - S)) & M;
134     uptr lbits = size & ((1 << (l - S)) - 1);
135     uptr l1 = l - kMidSizeLog;
136     return kMidClass + (l1 << S) + hbits + (lbits > 0);
137   }
138 
MaxCached(uptr class_id)139   static uptr MaxCached(uptr class_id) {
140     if (class_id == 0) return 0;
141     uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id);
142     return Max<uptr>(1, Min(kMaxNumCached, n));
143   }
144 
Print()145   static void Print() {
146     uptr prev_s = 0;
147     uptr total_cached = 0;
148     for (uptr i = 0; i < kNumClasses; i++) {
149       uptr s = Size(i);
150       if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
151         Printf("\n");
152       uptr d = s - prev_s;
153       uptr p = prev_s ? (d * 100 / prev_s) : 0;
154       uptr l = s ? MostSignificantSetBitIndex(s) : 0;
155       uptr cached = MaxCached(i) * s;
156       Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
157              "cached: %zd %zd; id %zd\n",
158              i, Size(i), d, p, l, MaxCached(i), cached, ClassID(s));
159       total_cached += cached;
160       prev_s = s;
161     }
162     Printf("Total cached: %zd\n", total_cached);
163   }
164 
SizeClassRequiresSeparateTransferBatch(uptr class_id)165   static bool SizeClassRequiresSeparateTransferBatch(uptr class_id) {
166     return Size(class_id) < sizeof(TransferBatch) -
167         sizeof(uptr) * (kMaxNumCached - MaxCached(class_id));
168   }
169 
Validate()170   static void Validate() {
171     for (uptr c = 1; c < kNumClasses; c++) {
172       // Printf("Validate: c%zd\n", c);
173       uptr s = Size(c);
174       CHECK_NE(s, 0U);
175       CHECK_EQ(ClassID(s), c);
176       if (c != kNumClasses - 1)
177         CHECK_EQ(ClassID(s + 1), c + 1);
178       CHECK_EQ(ClassID(s - 1), c);
179       if (c)
180         CHECK_GT(Size(c), Size(c-1));
181     }
182     CHECK_EQ(ClassID(kMaxSize + 1), 0);
183 
184     for (uptr s = 1; s <= kMaxSize; s++) {
185       uptr c = ClassID(s);
186       // Printf("s%zd => c%zd\n", s, c);
187       CHECK_LT(c, kNumClasses);
188       CHECK_GE(Size(c), s);
189       if (c > 0)
190         CHECK_LT(Size(c-1), s);
191     }
192   }
193 };
194 
195 typedef SizeClassMap<17, 128, 16> DefaultSizeClassMap;
196 typedef SizeClassMap<17, 64,  14> CompactSizeClassMap;
197 template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache;
198 
199 // Memory allocator statistics
200 enum AllocatorStat {
201   AllocatorStatAllocated,
202   AllocatorStatMapped,
203   AllocatorStatCount
204 };
205 
206 typedef uptr AllocatorStatCounters[AllocatorStatCount];
207 
208 // Per-thread stats, live in per-thread cache.
209 class AllocatorStats {
210  public:
Init()211   void Init() {
212     internal_memset(this, 0, sizeof(*this));
213   }
InitLinkerInitialized()214   void InitLinkerInitialized() {}
215 
Add(AllocatorStat i,uptr v)216   void Add(AllocatorStat i, uptr v) {
217     v += atomic_load(&stats_[i], memory_order_relaxed);
218     atomic_store(&stats_[i], v, memory_order_relaxed);
219   }
220 
Sub(AllocatorStat i,uptr v)221   void Sub(AllocatorStat i, uptr v) {
222     v = atomic_load(&stats_[i], memory_order_relaxed) - v;
223     atomic_store(&stats_[i], v, memory_order_relaxed);
224   }
225 
Set(AllocatorStat i,uptr v)226   void Set(AllocatorStat i, uptr v) {
227     atomic_store(&stats_[i], v, memory_order_relaxed);
228   }
229 
Get(AllocatorStat i)230   uptr Get(AllocatorStat i) const {
231     return atomic_load(&stats_[i], memory_order_relaxed);
232   }
233 
234  private:
235   friend class AllocatorGlobalStats;
236   AllocatorStats *next_;
237   AllocatorStats *prev_;
238   atomic_uintptr_t stats_[AllocatorStatCount];
239 };
240 
241 // Global stats, used for aggregation and querying.
242 class AllocatorGlobalStats : public AllocatorStats {
243  public:
InitLinkerInitialized()244   void InitLinkerInitialized() {
245     next_ = this;
246     prev_ = this;
247   }
Init()248   void Init() {
249     internal_memset(this, 0, sizeof(*this));
250     InitLinkerInitialized();
251   }
252 
Register(AllocatorStats * s)253   void Register(AllocatorStats *s) {
254     SpinMutexLock l(&mu_);
255     s->next_ = next_;
256     s->prev_ = this;
257     next_->prev_ = s;
258     next_ = s;
259   }
260 
Unregister(AllocatorStats * s)261   void Unregister(AllocatorStats *s) {
262     SpinMutexLock l(&mu_);
263     s->prev_->next_ = s->next_;
264     s->next_->prev_ = s->prev_;
265     for (int i = 0; i < AllocatorStatCount; i++)
266       Add(AllocatorStat(i), s->Get(AllocatorStat(i)));
267   }
268 
Get(AllocatorStatCounters s)269   void Get(AllocatorStatCounters s) const {
270     internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
271     SpinMutexLock l(&mu_);
272     const AllocatorStats *stats = this;
273     for (;;) {
274       for (int i = 0; i < AllocatorStatCount; i++)
275         s[i] += stats->Get(AllocatorStat(i));
276       stats = stats->next_;
277       if (stats == this)
278         break;
279     }
280     // All stats must be non-negative.
281     for (int i = 0; i < AllocatorStatCount; i++)
282       s[i] = ((sptr)s[i]) >= 0 ? s[i] : 0;
283   }
284 
285  private:
286   mutable SpinMutex mu_;
287 };
288 
289 // Allocators call these callbacks on mmap/munmap.
290 struct NoOpMapUnmapCallback {
OnMapNoOpMapUnmapCallback291   void OnMap(uptr p, uptr size) const { }
OnUnmapNoOpMapUnmapCallback292   void OnUnmap(uptr p, uptr size) const { }
293 };
294 
295 // Callback type for iterating over chunks.
296 typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
297 
298 // SizeClassAllocator64 -- allocator for 64-bit address space.
299 //
300 // Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.
301 // If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically my mmap.
302 // Otherwise SpaceBeg=kSpaceBeg (fixed address).
303 // kSpaceSize is a power of two.
304 // At the beginning the entire space is mprotect-ed, then small parts of it
305 // are mapped on demand.
306 //
307 // Region: a part of Space dedicated to a single size class.
308 // There are kNumClasses Regions of equal size.
309 //
310 // UserChunk: a piece of memory returned to user.
311 // MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
312 //
313 // A Region looks like this:
314 // UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
315 template <const uptr kSpaceBeg, const uptr kSpaceSize,
316           const uptr kMetadataSize, class SizeClassMap,
317           class MapUnmapCallback = NoOpMapUnmapCallback>
318 class SizeClassAllocator64 {
319  public:
320   typedef typename SizeClassMap::TransferBatch Batch;
321   typedef SizeClassAllocator64<kSpaceBeg, kSpaceSize, kMetadataSize,
322       SizeClassMap, MapUnmapCallback> ThisT;
323   typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
324 
Init()325   void Init() {
326     uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
327     if (kUsingConstantSpaceBeg) {
328       CHECK_EQ(kSpaceBeg, reinterpret_cast<uptr>(
329                               MmapFixedNoAccess(kSpaceBeg, TotalSpaceSize)));
330     } else {
331       NonConstSpaceBeg =
332           reinterpret_cast<uptr>(MmapNoAccess(TotalSpaceSize));
333       CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
334     }
335     MapWithCallback(SpaceEnd(), AdditionalSize());
336   }
337 
MapWithCallback(uptr beg,uptr size)338   void MapWithCallback(uptr beg, uptr size) {
339     CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
340     MapUnmapCallback().OnMap(beg, size);
341   }
342 
UnmapWithCallback(uptr beg,uptr size)343   void UnmapWithCallback(uptr beg, uptr size) {
344     MapUnmapCallback().OnUnmap(beg, size);
345     UnmapOrDie(reinterpret_cast<void *>(beg), size);
346   }
347 
CanAllocate(uptr size,uptr alignment)348   static bool CanAllocate(uptr size, uptr alignment) {
349     return size <= SizeClassMap::kMaxSize &&
350       alignment <= SizeClassMap::kMaxSize;
351   }
352 
AllocateBatch(AllocatorStats * stat,AllocatorCache * c,uptr class_id)353   NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
354                                 uptr class_id) {
355     CHECK_LT(class_id, kNumClasses);
356     RegionInfo *region = GetRegionInfo(class_id);
357     Batch *b = region->free_list.Pop();
358     if (!b)
359       b = PopulateFreeList(stat, c, class_id, region);
360     region->n_allocated += b->count;
361     return b;
362   }
363 
DeallocateBatch(AllocatorStats * stat,uptr class_id,Batch * b)364   NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
365     RegionInfo *region = GetRegionInfo(class_id);
366     CHECK_GT(b->count, 0);
367     region->free_list.Push(b);
368     region->n_freed += b->count;
369   }
370 
PointerIsMine(const void * p)371   bool PointerIsMine(const void *p) {
372     uptr P = reinterpret_cast<uptr>(p);
373     if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
374       return P / kSpaceSize == kSpaceBeg / kSpaceSize;
375     return P >= SpaceBeg() && P < SpaceEnd();
376   }
377 
GetSizeClass(const void * p)378   uptr GetSizeClass(const void *p) {
379     if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
380       return ((reinterpret_cast<uptr>(p)) / kRegionSize) % kNumClassesRounded;
381     return ((reinterpret_cast<uptr>(p) - SpaceBeg()) / kRegionSize) %
382            kNumClassesRounded;
383   }
384 
GetBlockBegin(const void * p)385   void *GetBlockBegin(const void *p) {
386     uptr class_id = GetSizeClass(p);
387     uptr size = SizeClassMap::Size(class_id);
388     if (!size) return nullptr;
389     uptr chunk_idx = GetChunkIdx((uptr)p, size);
390     uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
391     uptr beg = chunk_idx * size;
392     uptr next_beg = beg + size;
393     if (class_id >= kNumClasses) return nullptr;
394     RegionInfo *region = GetRegionInfo(class_id);
395     if (region->mapped_user >= next_beg)
396       return reinterpret_cast<void*>(reg_beg + beg);
397     return nullptr;
398   }
399 
GetActuallyAllocatedSize(void * p)400   uptr GetActuallyAllocatedSize(void *p) {
401     CHECK(PointerIsMine(p));
402     return SizeClassMap::Size(GetSizeClass(p));
403   }
404 
ClassID(uptr size)405   uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
406 
GetMetaData(const void * p)407   void *GetMetaData(const void *p) {
408     uptr class_id = GetSizeClass(p);
409     uptr size = SizeClassMap::Size(class_id);
410     uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
411     return reinterpret_cast<void *>(SpaceBeg() +
412                                     (kRegionSize * (class_id + 1)) -
413                                     (1 + chunk_idx) * kMetadataSize);
414   }
415 
TotalMemoryUsed()416   uptr TotalMemoryUsed() {
417     uptr res = 0;
418     for (uptr i = 0; i < kNumClasses; i++)
419       res += GetRegionInfo(i)->allocated_user;
420     return res;
421   }
422 
423   // Test-only.
TestOnlyUnmap()424   void TestOnlyUnmap() {
425     UnmapWithCallback(SpaceBeg(), kSpaceSize + AdditionalSize());
426   }
427 
PrintStats()428   void PrintStats() {
429     uptr total_mapped = 0;
430     uptr n_allocated = 0;
431     uptr n_freed = 0;
432     for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
433       RegionInfo *region = GetRegionInfo(class_id);
434       total_mapped += region->mapped_user;
435       n_allocated += region->n_allocated;
436       n_freed += region->n_freed;
437     }
438     Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; "
439            "remains %zd\n",
440            total_mapped >> 20, n_allocated, n_allocated - n_freed);
441     for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
442       RegionInfo *region = GetRegionInfo(class_id);
443       if (region->mapped_user == 0) continue;
444       Printf("  %02zd (%zd): total: %zd K allocs: %zd remains: %zd\n",
445              class_id,
446              SizeClassMap::Size(class_id),
447              region->mapped_user >> 10,
448              region->n_allocated,
449              region->n_allocated - region->n_freed);
450     }
451   }
452 
453   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
454   // introspection API.
ForceLock()455   void ForceLock() {
456     for (uptr i = 0; i < kNumClasses; i++) {
457       GetRegionInfo(i)->mutex.Lock();
458     }
459   }
460 
ForceUnlock()461   void ForceUnlock() {
462     for (int i = (int)kNumClasses - 1; i >= 0; i--) {
463       GetRegionInfo(i)->mutex.Unlock();
464     }
465   }
466 
467   // Iterate over all existing chunks.
468   // The allocator must be locked when calling this function.
ForEachChunk(ForEachChunkCallback callback,void * arg)469   void ForEachChunk(ForEachChunkCallback callback, void *arg) {
470     for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
471       RegionInfo *region = GetRegionInfo(class_id);
472       uptr chunk_size = SizeClassMap::Size(class_id);
473       uptr region_beg = SpaceBeg() + class_id * kRegionSize;
474       for (uptr chunk = region_beg;
475            chunk < region_beg + region->allocated_user;
476            chunk += chunk_size) {
477         // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
478         callback(chunk, arg);
479       }
480     }
481   }
482 
AdditionalSize()483   static uptr AdditionalSize() {
484     return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
485                      GetPageSizeCached());
486   }
487 
488   typedef SizeClassMap SizeClassMapT;
489   static const uptr kNumClasses = SizeClassMap::kNumClasses;
490   static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
491 
492  private:
493   static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
494 
495   static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0;
496   uptr NonConstSpaceBeg;
SpaceBeg()497   uptr SpaceBeg() const {
498     return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg;
499   }
SpaceEnd()500   uptr SpaceEnd() const { return  SpaceBeg() + kSpaceSize; }
501   // kRegionSize must be >= 2^32.
502   COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
503   // Populate the free list with at most this number of bytes at once
504   // or with one element if its size is greater.
505   static const uptr kPopulateSize = 1 << 14;
506   // Call mmap for user memory with at least this size.
507   static const uptr kUserMapSize = 1 << 16;
508   // Call mmap for metadata memory with at least this size.
509   static const uptr kMetaMapSize = 1 << 16;
510 
511   struct RegionInfo {
512     BlockingMutex mutex;
513     LFStack<Batch> free_list;
514     uptr allocated_user;  // Bytes allocated for user memory.
515     uptr allocated_meta;  // Bytes allocated for metadata.
516     uptr mapped_user;  // Bytes mapped for user memory.
517     uptr mapped_meta;  // Bytes mapped for metadata.
518     uptr n_allocated, n_freed;  // Just stats.
519   };
520   COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
521 
GetRegionInfo(uptr class_id)522   RegionInfo *GetRegionInfo(uptr class_id) {
523     CHECK_LT(class_id, kNumClasses);
524     RegionInfo *regions =
525         reinterpret_cast<RegionInfo *>(SpaceBeg() + kSpaceSize);
526     return &regions[class_id];
527   }
528 
GetChunkIdx(uptr chunk,uptr size)529   static uptr GetChunkIdx(uptr chunk, uptr size) {
530     uptr offset = chunk % kRegionSize;
531     // Here we divide by a non-constant. This is costly.
532     // size always fits into 32-bits. If the offset fits too, use 32-bit div.
533     if (offset >> (SANITIZER_WORDSIZE / 2))
534       return offset / size;
535     return (u32)offset / (u32)size;
536   }
537 
PopulateFreeList(AllocatorStats * stat,AllocatorCache * c,uptr class_id,RegionInfo * region)538   NOINLINE Batch* PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
539                                    uptr class_id, RegionInfo *region) {
540     BlockingMutexLock l(&region->mutex);
541     Batch *b = region->free_list.Pop();
542     if (b)
543       return b;
544     uptr size = SizeClassMap::Size(class_id);
545     uptr count = size < kPopulateSize ? SizeClassMap::MaxCached(class_id) : 1;
546     uptr beg_idx = region->allocated_user;
547     uptr end_idx = beg_idx + count * size;
548     uptr region_beg = SpaceBeg() + kRegionSize * class_id;
549     if (end_idx + size > region->mapped_user) {
550       // Do the mmap for the user memory.
551       uptr map_size = kUserMapSize;
552       while (end_idx + size > region->mapped_user + map_size)
553         map_size += kUserMapSize;
554       CHECK_GE(region->mapped_user + map_size, end_idx);
555       MapWithCallback(region_beg + region->mapped_user, map_size);
556       stat->Add(AllocatorStatMapped, map_size);
557       region->mapped_user += map_size;
558     }
559     uptr total_count = (region->mapped_user - beg_idx - size)
560         / size / count * count;
561     region->allocated_meta += total_count * kMetadataSize;
562     if (region->allocated_meta > region->mapped_meta) {
563       uptr map_size = kMetaMapSize;
564       while (region->allocated_meta > region->mapped_meta + map_size)
565         map_size += kMetaMapSize;
566       // Do the mmap for the metadata.
567       CHECK_GE(region->mapped_meta + map_size, region->allocated_meta);
568       MapWithCallback(region_beg + kRegionSize -
569                       region->mapped_meta - map_size, map_size);
570       region->mapped_meta += map_size;
571     }
572     CHECK_LE(region->allocated_meta, region->mapped_meta);
573     if (region->mapped_user + region->mapped_meta > kRegionSize) {
574       Printf("%s: Out of memory. Dying. ", SanitizerToolName);
575       Printf("The process has exhausted %zuMB for size class %zu.\n",
576           kRegionSize / 1024 / 1024, size);
577       Die();
578     }
579     for (;;) {
580       if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
581         b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
582       else
583         b = (Batch*)(region_beg + beg_idx);
584       b->count = count;
585       for (uptr i = 0; i < count; i++)
586         b->batch[i] = (void*)(region_beg + beg_idx + i * size);
587       region->allocated_user += count * size;
588       CHECK_LE(region->allocated_user, region->mapped_user);
589       beg_idx += count * size;
590       if (beg_idx + count * size + size > region->mapped_user)
591         break;
592       CHECK_GT(b->count, 0);
593       region->free_list.Push(b);
594     }
595     return b;
596   }
597 };
598 
599 // Maps integers in rage [0, kSize) to u8 values.
600 template<u64 kSize>
601 class FlatByteMap {
602  public:
TestOnlyInit()603   void TestOnlyInit() {
604     internal_memset(map_, 0, sizeof(map_));
605   }
606 
set(uptr idx,u8 val)607   void set(uptr idx, u8 val) {
608     CHECK_LT(idx, kSize);
609     CHECK_EQ(0U, map_[idx]);
610     map_[idx] = val;
611   }
612   u8 operator[] (uptr idx) {
613     CHECK_LT(idx, kSize);
614     // FIXME: CHECK may be too expensive here.
615     return map_[idx];
616   }
617  private:
618   u8 map_[kSize];
619 };
620 
621 // TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values.
622 // It is implemented as a two-dimensional array: array of kSize1 pointers
623 // to kSize2-byte arrays. The secondary arrays are mmaped on demand.
624 // Each value is initially zero and can be set to something else only once.
625 // Setting and getting values from multiple threads is safe w/o extra locking.
626 template <u64 kSize1, u64 kSize2, class MapUnmapCallback = NoOpMapUnmapCallback>
627 class TwoLevelByteMap {
628  public:
TestOnlyInit()629   void TestOnlyInit() {
630     internal_memset(map1_, 0, sizeof(map1_));
631     mu_.Init();
632   }
633 
TestOnlyUnmap()634   void TestOnlyUnmap() {
635     for (uptr i = 0; i < kSize1; i++) {
636       u8 *p = Get(i);
637       if (!p) continue;
638       MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2);
639       UnmapOrDie(p, kSize2);
640     }
641   }
642 
size()643   uptr size() const { return kSize1 * kSize2; }
size1()644   uptr size1() const { return kSize1; }
size2()645   uptr size2() const { return kSize2; }
646 
set(uptr idx,u8 val)647   void set(uptr idx, u8 val) {
648     CHECK_LT(idx, kSize1 * kSize2);
649     u8 *map2 = GetOrCreate(idx / kSize2);
650     CHECK_EQ(0U, map2[idx % kSize2]);
651     map2[idx % kSize2] = val;
652   }
653 
654   u8 operator[] (uptr idx) const {
655     CHECK_LT(idx, kSize1 * kSize2);
656     u8 *map2 = Get(idx / kSize2);
657     if (!map2) return 0;
658     return map2[idx % kSize2];
659   }
660 
661  private:
Get(uptr idx)662   u8 *Get(uptr idx) const {
663     CHECK_LT(idx, kSize1);
664     return reinterpret_cast<u8 *>(
665         atomic_load(&map1_[idx], memory_order_acquire));
666   }
667 
GetOrCreate(uptr idx)668   u8 *GetOrCreate(uptr idx) {
669     u8 *res = Get(idx);
670     if (!res) {
671       SpinMutexLock l(&mu_);
672       if (!(res = Get(idx))) {
673         res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap");
674         MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);
675         atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
676                      memory_order_release);
677       }
678     }
679     return res;
680   }
681 
682   atomic_uintptr_t map1_[kSize1];
683   StaticSpinMutex mu_;
684 };
685 
686 // SizeClassAllocator32 -- allocator for 32-bit address space.
687 // This allocator can theoretically be used on 64-bit arch, but there it is less
688 // efficient than SizeClassAllocator64.
689 //
690 // [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
691 // be returned by MmapOrDie().
692 //
693 // Region:
694 //   a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
695 // Since the regions are aligned by kRegionSize, there are exactly
696 // kNumPossibleRegions possible regions in the address space and so we keep
697 // a ByteMap possible_regions to store the size classes of each Region.
698 // 0 size class means the region is not used by the allocator.
699 //
700 // One Region is used to allocate chunks of a single size class.
701 // A Region looks like this:
702 // UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
703 //
704 // In order to avoid false sharing the objects of this class should be
705 // chache-line aligned.
706 template <const uptr kSpaceBeg, const u64 kSpaceSize,
707           const uptr kMetadataSize, class SizeClassMap,
708           const uptr kRegionSizeLog,
709           class ByteMap,
710           class MapUnmapCallback = NoOpMapUnmapCallback>
711 class SizeClassAllocator32 {
712  public:
713   typedef typename SizeClassMap::TransferBatch Batch;
714   typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize,
715       SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT;
716   typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
717 
Init()718   void Init() {
719     possible_regions.TestOnlyInit();
720     internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
721   }
722 
MapWithCallback(uptr size)723   void *MapWithCallback(uptr size) {
724     size = RoundUpTo(size, GetPageSizeCached());
725     void *res = MmapOrDie(size, "SizeClassAllocator32");
726     MapUnmapCallback().OnMap((uptr)res, size);
727     return res;
728   }
729 
UnmapWithCallback(uptr beg,uptr size)730   void UnmapWithCallback(uptr beg, uptr size) {
731     MapUnmapCallback().OnUnmap(beg, size);
732     UnmapOrDie(reinterpret_cast<void *>(beg), size);
733   }
734 
CanAllocate(uptr size,uptr alignment)735   static bool CanAllocate(uptr size, uptr alignment) {
736     return size <= SizeClassMap::kMaxSize &&
737       alignment <= SizeClassMap::kMaxSize;
738   }
739 
GetMetaData(const void * p)740   void *GetMetaData(const void *p) {
741     CHECK(PointerIsMine(p));
742     uptr mem = reinterpret_cast<uptr>(p);
743     uptr beg = ComputeRegionBeg(mem);
744     uptr size = SizeClassMap::Size(GetSizeClass(p));
745     u32 offset = mem - beg;
746     uptr n = offset / (u32)size;  // 32-bit division
747     uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
748     return reinterpret_cast<void*>(meta);
749   }
750 
AllocateBatch(AllocatorStats * stat,AllocatorCache * c,uptr class_id)751   NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
752                                 uptr class_id) {
753     CHECK_LT(class_id, kNumClasses);
754     SizeClassInfo *sci = GetSizeClassInfo(class_id);
755     SpinMutexLock l(&sci->mutex);
756     if (sci->free_list.empty())
757       PopulateFreeList(stat, c, sci, class_id);
758     CHECK(!sci->free_list.empty());
759     Batch *b = sci->free_list.front();
760     sci->free_list.pop_front();
761     return b;
762   }
763 
DeallocateBatch(AllocatorStats * stat,uptr class_id,Batch * b)764   NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
765     CHECK_LT(class_id, kNumClasses);
766     SizeClassInfo *sci = GetSizeClassInfo(class_id);
767     SpinMutexLock l(&sci->mutex);
768     CHECK_GT(b->count, 0);
769     sci->free_list.push_front(b);
770   }
771 
PointerIsMine(const void * p)772   bool PointerIsMine(const void *p) {
773     uptr mem = reinterpret_cast<uptr>(p);
774     if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
775       return false;
776     return GetSizeClass(p) != 0;
777   }
778 
GetSizeClass(const void * p)779   uptr GetSizeClass(const void *p) {
780     return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
781   }
782 
GetBlockBegin(const void * p)783   void *GetBlockBegin(const void *p) {
784     CHECK(PointerIsMine(p));
785     uptr mem = reinterpret_cast<uptr>(p);
786     uptr beg = ComputeRegionBeg(mem);
787     uptr size = SizeClassMap::Size(GetSizeClass(p));
788     u32 offset = mem - beg;
789     u32 n = offset / (u32)size;  // 32-bit division
790     uptr res = beg + (n * (u32)size);
791     return reinterpret_cast<void*>(res);
792   }
793 
GetActuallyAllocatedSize(void * p)794   uptr GetActuallyAllocatedSize(void *p) {
795     CHECK(PointerIsMine(p));
796     return SizeClassMap::Size(GetSizeClass(p));
797   }
798 
ClassID(uptr size)799   uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
800 
TotalMemoryUsed()801   uptr TotalMemoryUsed() {
802     // No need to lock here.
803     uptr res = 0;
804     for (uptr i = 0; i < kNumPossibleRegions; i++)
805       if (possible_regions[i])
806         res += kRegionSize;
807     return res;
808   }
809 
TestOnlyUnmap()810   void TestOnlyUnmap() {
811     for (uptr i = 0; i < kNumPossibleRegions; i++)
812       if (possible_regions[i])
813         UnmapWithCallback((i * kRegionSize), kRegionSize);
814   }
815 
816   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
817   // introspection API.
ForceLock()818   void ForceLock() {
819     for (uptr i = 0; i < kNumClasses; i++) {
820       GetSizeClassInfo(i)->mutex.Lock();
821     }
822   }
823 
ForceUnlock()824   void ForceUnlock() {
825     for (int i = kNumClasses - 1; i >= 0; i--) {
826       GetSizeClassInfo(i)->mutex.Unlock();
827     }
828   }
829 
830   // Iterate over all existing chunks.
831   // The allocator must be locked when calling this function.
ForEachChunk(ForEachChunkCallback callback,void * arg)832   void ForEachChunk(ForEachChunkCallback callback, void *arg) {
833     for (uptr region = 0; region < kNumPossibleRegions; region++)
834       if (possible_regions[region]) {
835         uptr chunk_size = SizeClassMap::Size(possible_regions[region]);
836         uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
837         uptr region_beg = region * kRegionSize;
838         for (uptr chunk = region_beg;
839              chunk < region_beg + max_chunks_in_region * chunk_size;
840              chunk += chunk_size) {
841           // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
842           callback(chunk, arg);
843         }
844       }
845   }
846 
PrintStats()847   void PrintStats() {
848   }
849 
AdditionalSize()850   static uptr AdditionalSize() {
851     return 0;
852   }
853 
854   typedef SizeClassMap SizeClassMapT;
855   static const uptr kNumClasses = SizeClassMap::kNumClasses;
856 
857  private:
858   static const uptr kRegionSize = 1 << kRegionSizeLog;
859   static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
860 
861   struct SizeClassInfo {
862     SpinMutex mutex;
863     IntrusiveList<Batch> free_list;
864     char padding[kCacheLineSize - sizeof(uptr) - sizeof(IntrusiveList<Batch>)];
865   };
866   COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
867 
ComputeRegionId(uptr mem)868   uptr ComputeRegionId(uptr mem) {
869     uptr res = mem >> kRegionSizeLog;
870     CHECK_LT(res, kNumPossibleRegions);
871     return res;
872   }
873 
ComputeRegionBeg(uptr mem)874   uptr ComputeRegionBeg(uptr mem) {
875     return mem & ~(kRegionSize - 1);
876   }
877 
AllocateRegion(AllocatorStats * stat,uptr class_id)878   uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
879     CHECK_LT(class_id, kNumClasses);
880     uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
881                                       "SizeClassAllocator32"));
882     MapUnmapCallback().OnMap(res, kRegionSize);
883     stat->Add(AllocatorStatMapped, kRegionSize);
884     CHECK_EQ(0U, (res & (kRegionSize - 1)));
885     possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
886     return res;
887   }
888 
GetSizeClassInfo(uptr class_id)889   SizeClassInfo *GetSizeClassInfo(uptr class_id) {
890     CHECK_LT(class_id, kNumClasses);
891     return &size_class_info_array[class_id];
892   }
893 
PopulateFreeList(AllocatorStats * stat,AllocatorCache * c,SizeClassInfo * sci,uptr class_id)894   void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
895                         SizeClassInfo *sci, uptr class_id) {
896     uptr size = SizeClassMap::Size(class_id);
897     uptr reg = AllocateRegion(stat, class_id);
898     uptr n_chunks = kRegionSize / (size + kMetadataSize);
899     uptr max_count = SizeClassMap::MaxCached(class_id);
900     Batch *b = nullptr;
901     for (uptr i = reg; i < reg + n_chunks * size; i += size) {
902       if (!b) {
903         if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
904           b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
905         else
906           b = (Batch*)i;
907         b->count = 0;
908       }
909       b->batch[b->count++] = (void*)i;
910       if (b->count == max_count) {
911         CHECK_GT(b->count, 0);
912         sci->free_list.push_back(b);
913         b = nullptr;
914       }
915     }
916     if (b) {
917       CHECK_GT(b->count, 0);
918       sci->free_list.push_back(b);
919     }
920   }
921 
922   ByteMap possible_regions;
923   SizeClassInfo size_class_info_array[kNumClasses];
924 };
925 
926 // Objects of this type should be used as local caches for SizeClassAllocator64
927 // or SizeClassAllocator32. Since the typical use of this class is to have one
928 // object per thread in TLS, is has to be POD.
929 template<class SizeClassAllocator>
930 struct SizeClassAllocatorLocalCache {
931   typedef SizeClassAllocator Allocator;
932   static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
933 
InitSizeClassAllocatorLocalCache934   void Init(AllocatorGlobalStats *s) {
935     stats_.Init();
936     if (s)
937       s->Register(&stats_);
938   }
939 
DestroySizeClassAllocatorLocalCache940   void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
941     Drain(allocator);
942     if (s)
943       s->Unregister(&stats_);
944   }
945 
AllocateSizeClassAllocatorLocalCache946   void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
947     CHECK_NE(class_id, 0UL);
948     CHECK_LT(class_id, kNumClasses);
949     stats_.Add(AllocatorStatAllocated, SizeClassMap::Size(class_id));
950     PerClass *c = &per_class_[class_id];
951     if (UNLIKELY(c->count == 0))
952       Refill(allocator, class_id);
953     void *res = c->batch[--c->count];
954     PREFETCH(c->batch[c->count - 1]);
955     return res;
956   }
957 
DeallocateSizeClassAllocatorLocalCache958   void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
959     CHECK_NE(class_id, 0UL);
960     CHECK_LT(class_id, kNumClasses);
961     // If the first allocator call on a new thread is a deallocation, then
962     // max_count will be zero, leading to check failure.
963     InitCache();
964     stats_.Sub(AllocatorStatAllocated, SizeClassMap::Size(class_id));
965     PerClass *c = &per_class_[class_id];
966     CHECK_NE(c->max_count, 0UL);
967     if (UNLIKELY(c->count == c->max_count))
968       Drain(allocator, class_id);
969     c->batch[c->count++] = p;
970   }
971 
DrainSizeClassAllocatorLocalCache972   void Drain(SizeClassAllocator *allocator) {
973     for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
974       PerClass *c = &per_class_[class_id];
975       while (c->count > 0)
976         Drain(allocator, class_id);
977     }
978   }
979 
980   // private:
981   typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
982   typedef typename SizeClassMap::TransferBatch Batch;
983   struct PerClass {
984     uptr count;
985     uptr max_count;
986     void *batch[2 * SizeClassMap::kMaxNumCached];
987   };
988   PerClass per_class_[kNumClasses];
989   AllocatorStats stats_;
990 
InitCacheSizeClassAllocatorLocalCache991   void InitCache() {
992     if (per_class_[1].max_count)
993       return;
994     for (uptr i = 0; i < kNumClasses; i++) {
995       PerClass *c = &per_class_[i];
996       c->max_count = 2 * SizeClassMap::MaxCached(i);
997     }
998   }
999 
RefillSizeClassAllocatorLocalCache1000   NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
1001     InitCache();
1002     PerClass *c = &per_class_[class_id];
1003     Batch *b = allocator->AllocateBatch(&stats_, this, class_id);
1004     CHECK_GT(b->count, 0);
1005     for (uptr i = 0; i < b->count; i++)
1006       c->batch[i] = b->batch[i];
1007     c->count = b->count;
1008     if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
1009       Deallocate(allocator, SizeClassMap::ClassID(sizeof(Batch)), b);
1010   }
1011 
DrainSizeClassAllocatorLocalCache1012   NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
1013     InitCache();
1014     PerClass *c = &per_class_[class_id];
1015     Batch *b;
1016     if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
1017       b = (Batch*)Allocate(allocator, SizeClassMap::ClassID(sizeof(Batch)));
1018     else
1019       b = (Batch*)c->batch[0];
1020     uptr cnt = Min(c->max_count / 2, c->count);
1021     for (uptr i = 0; i < cnt; i++) {
1022       b->batch[i] = c->batch[i];
1023       c->batch[i] = c->batch[i + c->max_count / 2];
1024     }
1025     b->count = cnt;
1026     c->count -= cnt;
1027     CHECK_GT(b->count, 0);
1028     allocator->DeallocateBatch(&stats_, class_id, b);
1029   }
1030 };
1031 
1032 // This class can (de)allocate only large chunks of memory using mmap/unmap.
1033 // The main purpose of this allocator is to cover large and rare allocation
1034 // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
1035 template <class MapUnmapCallback = NoOpMapUnmapCallback>
1036 class LargeMmapAllocator {
1037  public:
InitLinkerInitialized(bool may_return_null)1038   void InitLinkerInitialized(bool may_return_null) {
1039     page_size_ = GetPageSizeCached();
1040     atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
1041   }
1042 
Init(bool may_return_null)1043   void Init(bool may_return_null) {
1044     internal_memset(this, 0, sizeof(*this));
1045     InitLinkerInitialized(may_return_null);
1046   }
1047 
Allocate(AllocatorStats * stat,uptr size,uptr alignment)1048   void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
1049     CHECK(IsPowerOfTwo(alignment));
1050     uptr map_size = RoundUpMapSize(size);
1051     if (alignment > page_size_)
1052       map_size += alignment;
1053     // Overflow.
1054     if (map_size < size)
1055       return ReturnNullOrDie();
1056     uptr map_beg = reinterpret_cast<uptr>(
1057         MmapOrDie(map_size, "LargeMmapAllocator"));
1058     CHECK(IsAligned(map_beg, page_size_));
1059     MapUnmapCallback().OnMap(map_beg, map_size);
1060     uptr map_end = map_beg + map_size;
1061     uptr res = map_beg + page_size_;
1062     if (res & (alignment - 1))  // Align.
1063       res += alignment - (res & (alignment - 1));
1064     CHECK(IsAligned(res, alignment));
1065     CHECK(IsAligned(res, page_size_));
1066     CHECK_GE(res + size, map_beg);
1067     CHECK_LE(res + size, map_end);
1068     Header *h = GetHeader(res);
1069     h->size = size;
1070     h->map_beg = map_beg;
1071     h->map_size = map_size;
1072     uptr size_log = MostSignificantSetBitIndex(map_size);
1073     CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
1074     {
1075       SpinMutexLock l(&mutex_);
1076       uptr idx = n_chunks_++;
1077       chunks_sorted_ = false;
1078       CHECK_LT(idx, kMaxNumChunks);
1079       h->chunk_idx = idx;
1080       chunks_[idx] = h;
1081       stats.n_allocs++;
1082       stats.currently_allocated += map_size;
1083       stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
1084       stats.by_size_log[size_log]++;
1085       stat->Add(AllocatorStatAllocated, map_size);
1086       stat->Add(AllocatorStatMapped, map_size);
1087     }
1088     return reinterpret_cast<void*>(res);
1089   }
1090 
ReturnNullOrDie()1091   void *ReturnNullOrDie() {
1092     if (atomic_load(&may_return_null_, memory_order_acquire))
1093       return nullptr;
1094     ReportAllocatorCannotReturnNull();
1095   }
1096 
SetMayReturnNull(bool may_return_null)1097   void SetMayReturnNull(bool may_return_null) {
1098     atomic_store(&may_return_null_, may_return_null, memory_order_release);
1099   }
1100 
Deallocate(AllocatorStats * stat,void * p)1101   void Deallocate(AllocatorStats *stat, void *p) {
1102     Header *h = GetHeader(p);
1103     {
1104       SpinMutexLock l(&mutex_);
1105       uptr idx = h->chunk_idx;
1106       CHECK_EQ(chunks_[idx], h);
1107       CHECK_LT(idx, n_chunks_);
1108       chunks_[idx] = chunks_[n_chunks_ - 1];
1109       chunks_[idx]->chunk_idx = idx;
1110       n_chunks_--;
1111       chunks_sorted_ = false;
1112       stats.n_frees++;
1113       stats.currently_allocated -= h->map_size;
1114       stat->Sub(AllocatorStatAllocated, h->map_size);
1115       stat->Sub(AllocatorStatMapped, h->map_size);
1116     }
1117     MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
1118     UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
1119   }
1120 
TotalMemoryUsed()1121   uptr TotalMemoryUsed() {
1122     SpinMutexLock l(&mutex_);
1123     uptr res = 0;
1124     for (uptr i = 0; i < n_chunks_; i++) {
1125       Header *h = chunks_[i];
1126       CHECK_EQ(h->chunk_idx, i);
1127       res += RoundUpMapSize(h->size);
1128     }
1129     return res;
1130   }
1131 
PointerIsMine(const void * p)1132   bool PointerIsMine(const void *p) {
1133     return GetBlockBegin(p) != nullptr;
1134   }
1135 
GetActuallyAllocatedSize(void * p)1136   uptr GetActuallyAllocatedSize(void *p) {
1137     return RoundUpTo(GetHeader(p)->size, page_size_);
1138   }
1139 
1140   // At least page_size_/2 metadata bytes is available.
GetMetaData(const void * p)1141   void *GetMetaData(const void *p) {
1142     // Too slow: CHECK_EQ(p, GetBlockBegin(p));
1143     if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
1144       Printf("%s: bad pointer %p\n", SanitizerToolName, p);
1145       CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
1146     }
1147     return GetHeader(p) + 1;
1148   }
1149 
GetBlockBegin(const void * ptr)1150   void *GetBlockBegin(const void *ptr) {
1151     uptr p = reinterpret_cast<uptr>(ptr);
1152     SpinMutexLock l(&mutex_);
1153     uptr nearest_chunk = 0;
1154     // Cache-friendly linear search.
1155     for (uptr i = 0; i < n_chunks_; i++) {
1156       uptr ch = reinterpret_cast<uptr>(chunks_[i]);
1157       if (p < ch) continue;  // p is at left to this chunk, skip it.
1158       if (p - ch < p - nearest_chunk)
1159         nearest_chunk = ch;
1160     }
1161     if (!nearest_chunk)
1162       return nullptr;
1163     Header *h = reinterpret_cast<Header *>(nearest_chunk);
1164     CHECK_GE(nearest_chunk, h->map_beg);
1165     CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
1166     CHECK_LE(nearest_chunk, p);
1167     if (h->map_beg + h->map_size <= p)
1168       return nullptr;
1169     return GetUser(h);
1170   }
1171 
1172   // This function does the same as GetBlockBegin, but is much faster.
1173   // Must be called with the allocator locked.
GetBlockBeginFastLocked(void * ptr)1174   void *GetBlockBeginFastLocked(void *ptr) {
1175     mutex_.CheckLocked();
1176     uptr p = reinterpret_cast<uptr>(ptr);
1177     uptr n = n_chunks_;
1178     if (!n) return nullptr;
1179     if (!chunks_sorted_) {
1180       // Do one-time sort. chunks_sorted_ is reset in Allocate/Deallocate.
1181       SortArray(reinterpret_cast<uptr*>(chunks_), n);
1182       for (uptr i = 0; i < n; i++)
1183         chunks_[i]->chunk_idx = i;
1184       chunks_sorted_ = true;
1185       min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
1186       max_mmap_ = reinterpret_cast<uptr>(chunks_[n - 1]) +
1187           chunks_[n - 1]->map_size;
1188     }
1189     if (p < min_mmap_ || p >= max_mmap_)
1190       return nullptr;
1191     uptr beg = 0, end = n - 1;
1192     // This loop is a log(n) lower_bound. It does not check for the exact match
1193     // to avoid expensive cache-thrashing loads.
1194     while (end - beg >= 2) {
1195       uptr mid = (beg + end) / 2;  // Invariant: mid >= beg + 1
1196       if (p < reinterpret_cast<uptr>(chunks_[mid]))
1197         end = mid - 1;  // We are not interested in chunks_[mid].
1198       else
1199         beg = mid;  // chunks_[mid] may still be what we want.
1200     }
1201 
1202     if (beg < end) {
1203       CHECK_EQ(beg + 1, end);
1204       // There are 2 chunks left, choose one.
1205       if (p >= reinterpret_cast<uptr>(chunks_[end]))
1206         beg = end;
1207     }
1208 
1209     Header *h = chunks_[beg];
1210     if (h->map_beg + h->map_size <= p || p < h->map_beg)
1211       return nullptr;
1212     return GetUser(h);
1213   }
1214 
PrintStats()1215   void PrintStats() {
1216     Printf("Stats: LargeMmapAllocator: allocated %zd times, "
1217            "remains %zd (%zd K) max %zd M; by size logs: ",
1218            stats.n_allocs, stats.n_allocs - stats.n_frees,
1219            stats.currently_allocated >> 10, stats.max_allocated >> 20);
1220     for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
1221       uptr c = stats.by_size_log[i];
1222       if (!c) continue;
1223       Printf("%zd:%zd; ", i, c);
1224     }
1225     Printf("\n");
1226   }
1227 
1228   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
1229   // introspection API.
ForceLock()1230   void ForceLock() {
1231     mutex_.Lock();
1232   }
1233 
ForceUnlock()1234   void ForceUnlock() {
1235     mutex_.Unlock();
1236   }
1237 
1238   // Iterate over all existing chunks.
1239   // The allocator must be locked when calling this function.
ForEachChunk(ForEachChunkCallback callback,void * arg)1240   void ForEachChunk(ForEachChunkCallback callback, void *arg) {
1241     for (uptr i = 0; i < n_chunks_; i++)
1242       callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
1243   }
1244 
1245  private:
1246   static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
1247   struct Header {
1248     uptr map_beg;
1249     uptr map_size;
1250     uptr size;
1251     uptr chunk_idx;
1252   };
1253 
GetHeader(uptr p)1254   Header *GetHeader(uptr p) {
1255     CHECK(IsAligned(p, page_size_));
1256     return reinterpret_cast<Header*>(p - page_size_);
1257   }
GetHeader(const void * p)1258   Header *GetHeader(const void *p) {
1259     return GetHeader(reinterpret_cast<uptr>(p));
1260   }
1261 
GetUser(Header * h)1262   void *GetUser(Header *h) {
1263     CHECK(IsAligned((uptr)h, page_size_));
1264     return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
1265   }
1266 
RoundUpMapSize(uptr size)1267   uptr RoundUpMapSize(uptr size) {
1268     return RoundUpTo(size, page_size_) + page_size_;
1269   }
1270 
1271   uptr page_size_;
1272   Header *chunks_[kMaxNumChunks];
1273   uptr n_chunks_;
1274   uptr min_mmap_, max_mmap_;
1275   bool chunks_sorted_;
1276   struct Stats {
1277     uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
1278   } stats;
1279   atomic_uint8_t may_return_null_;
1280   SpinMutex mutex_;
1281 };
1282 
1283 // This class implements a complete memory allocator by using two
1284 // internal allocators:
1285 // PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
1286 //  When allocating 2^x bytes it should return 2^x aligned chunk.
1287 // PrimaryAllocator is used via a local AllocatorCache.
1288 // SecondaryAllocator can allocate anything, but is not efficient.
1289 template <class PrimaryAllocator, class AllocatorCache,
1290           class SecondaryAllocator>  // NOLINT
1291 class CombinedAllocator {
1292  public:
InitCommon(bool may_return_null)1293   void InitCommon(bool may_return_null) {
1294     primary_.Init();
1295     atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
1296   }
1297 
InitLinkerInitialized(bool may_return_null)1298   void InitLinkerInitialized(bool may_return_null) {
1299     secondary_.InitLinkerInitialized(may_return_null);
1300     stats_.InitLinkerInitialized();
1301     InitCommon(may_return_null);
1302   }
1303 
Init(bool may_return_null)1304   void Init(bool may_return_null) {
1305     secondary_.Init(may_return_null);
1306     stats_.Init();
1307     InitCommon(may_return_null);
1308   }
1309 
1310   void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
1311                  bool cleared = false, bool check_rss_limit = false) {
1312     // Returning 0 on malloc(0) may break a lot of code.
1313     if (size == 0)
1314       size = 1;
1315     if (size + alignment < size)
1316       return ReturnNullOrDie();
1317     if (check_rss_limit && RssLimitIsExceeded())
1318       return ReturnNullOrDie();
1319     if (alignment > 8)
1320       size = RoundUpTo(size, alignment);
1321     void *res;
1322     bool from_primary = primary_.CanAllocate(size, alignment);
1323     if (from_primary)
1324       res = cache->Allocate(&primary_, primary_.ClassID(size));
1325     else
1326       res = secondary_.Allocate(&stats_, size, alignment);
1327     if (alignment > 8)
1328       CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
1329     if (cleared && res && from_primary)
1330       internal_bzero_aligned16(res, RoundUpTo(size, 16));
1331     return res;
1332   }
1333 
MayReturnNull()1334   bool MayReturnNull() const {
1335     return atomic_load(&may_return_null_, memory_order_acquire);
1336   }
1337 
ReturnNullOrDie()1338   void *ReturnNullOrDie() {
1339     if (MayReturnNull())
1340       return nullptr;
1341     ReportAllocatorCannotReturnNull();
1342   }
1343 
SetMayReturnNull(bool may_return_null)1344   void SetMayReturnNull(bool may_return_null) {
1345     secondary_.SetMayReturnNull(may_return_null);
1346     atomic_store(&may_return_null_, may_return_null, memory_order_release);
1347   }
1348 
RssLimitIsExceeded()1349   bool RssLimitIsExceeded() {
1350     return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
1351   }
1352 
SetRssLimitIsExceeded(bool rss_limit_is_exceeded)1353   void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) {
1354     atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded,
1355                  memory_order_release);
1356   }
1357 
Deallocate(AllocatorCache * cache,void * p)1358   void Deallocate(AllocatorCache *cache, void *p) {
1359     if (!p) return;
1360     if (primary_.PointerIsMine(p))
1361       cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
1362     else
1363       secondary_.Deallocate(&stats_, p);
1364   }
1365 
Reallocate(AllocatorCache * cache,void * p,uptr new_size,uptr alignment)1366   void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
1367                    uptr alignment) {
1368     if (!p)
1369       return Allocate(cache, new_size, alignment);
1370     if (!new_size) {
1371       Deallocate(cache, p);
1372       return nullptr;
1373     }
1374     CHECK(PointerIsMine(p));
1375     uptr old_size = GetActuallyAllocatedSize(p);
1376     uptr memcpy_size = Min(new_size, old_size);
1377     void *new_p = Allocate(cache, new_size, alignment);
1378     if (new_p)
1379       internal_memcpy(new_p, p, memcpy_size);
1380     Deallocate(cache, p);
1381     return new_p;
1382   }
1383 
PointerIsMine(void * p)1384   bool PointerIsMine(void *p) {
1385     if (primary_.PointerIsMine(p))
1386       return true;
1387     return secondary_.PointerIsMine(p);
1388   }
1389 
FromPrimary(void * p)1390   bool FromPrimary(void *p) {
1391     return primary_.PointerIsMine(p);
1392   }
1393 
GetMetaData(const void * p)1394   void *GetMetaData(const void *p) {
1395     if (primary_.PointerIsMine(p))
1396       return primary_.GetMetaData(p);
1397     return secondary_.GetMetaData(p);
1398   }
1399 
GetBlockBegin(const void * p)1400   void *GetBlockBegin(const void *p) {
1401     if (primary_.PointerIsMine(p))
1402       return primary_.GetBlockBegin(p);
1403     return secondary_.GetBlockBegin(p);
1404   }
1405 
1406   // This function does the same as GetBlockBegin, but is much faster.
1407   // Must be called with the allocator locked.
GetBlockBeginFastLocked(void * p)1408   void *GetBlockBeginFastLocked(void *p) {
1409     if (primary_.PointerIsMine(p))
1410       return primary_.GetBlockBegin(p);
1411     return secondary_.GetBlockBeginFastLocked(p);
1412   }
1413 
GetActuallyAllocatedSize(void * p)1414   uptr GetActuallyAllocatedSize(void *p) {
1415     if (primary_.PointerIsMine(p))
1416       return primary_.GetActuallyAllocatedSize(p);
1417     return secondary_.GetActuallyAllocatedSize(p);
1418   }
1419 
TotalMemoryUsed()1420   uptr TotalMemoryUsed() {
1421     return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
1422   }
1423 
TestOnlyUnmap()1424   void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
1425 
InitCache(AllocatorCache * cache)1426   void InitCache(AllocatorCache *cache) {
1427     cache->Init(&stats_);
1428   }
1429 
DestroyCache(AllocatorCache * cache)1430   void DestroyCache(AllocatorCache *cache) {
1431     cache->Destroy(&primary_, &stats_);
1432   }
1433 
SwallowCache(AllocatorCache * cache)1434   void SwallowCache(AllocatorCache *cache) {
1435     cache->Drain(&primary_);
1436   }
1437 
GetStats(AllocatorStatCounters s)1438   void GetStats(AllocatorStatCounters s) const {
1439     stats_.Get(s);
1440   }
1441 
PrintStats()1442   void PrintStats() {
1443     primary_.PrintStats();
1444     secondary_.PrintStats();
1445   }
1446 
1447   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
1448   // introspection API.
ForceLock()1449   void ForceLock() {
1450     primary_.ForceLock();
1451     secondary_.ForceLock();
1452   }
1453 
ForceUnlock()1454   void ForceUnlock() {
1455     secondary_.ForceUnlock();
1456     primary_.ForceUnlock();
1457   }
1458 
1459   // Iterate over all existing chunks.
1460   // The allocator must be locked when calling this function.
ForEachChunk(ForEachChunkCallback callback,void * arg)1461   void ForEachChunk(ForEachChunkCallback callback, void *arg) {
1462     primary_.ForEachChunk(callback, arg);
1463     secondary_.ForEachChunk(callback, arg);
1464   }
1465 
1466  private:
1467   PrimaryAllocator primary_;
1468   SecondaryAllocator secondary_;
1469   AllocatorGlobalStats stats_;
1470   atomic_uint8_t may_return_null_;
1471   atomic_uint8_t rss_limit_is_exceeded_;
1472 };
1473 
1474 // Returns true if calloc(size, n) should return 0 due to overflow in size*n.
1475 bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n);
1476 
1477 } // namespace __sanitizer
1478 
1479 #endif // SANITIZER_ALLOCATOR_H
1480