• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- sanitizer_allocator.h -----------------------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Specialized memory allocator for ThreadSanitizer, MemorySanitizer, etc.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef SANITIZER_ALLOCATOR_H
15 #define SANITIZER_ALLOCATOR_H
16 
17 #include "sanitizer_internal_defs.h"
18 #include "sanitizer_common.h"
19 #include "sanitizer_libc.h"
20 #include "sanitizer_list.h"
21 #include "sanitizer_mutex.h"
22 #include "sanitizer_lfstack.h"
23 
24 namespace __sanitizer {
25 
26 // Depending on allocator_may_return_null either return 0 or crash.
27 void *AllocatorReturnNull();
28 
29 // SizeClassMap maps allocation sizes into size classes and back.
30 // Class 0 corresponds to size 0.
31 // Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
32 // Next 4 classes: 256 + i * 64  (i = 1 to 4).
33 // Next 4 classes: 512 + i * 128 (i = 1 to 4).
34 // ...
35 // Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).
36 // Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
37 //
38 // This structure of the size class map gives us:
39 //   - Efficient table-free class-to-size and size-to-class functions.
40 //   - Difference between two consequent size classes is betweed 14% and 25%
41 //
42 // This class also gives a hint to a thread-caching allocator about the amount
43 // of chunks that need to be cached per-thread:
44 //  - kMaxNumCached is the maximal number of chunks per size class.
45 //  - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
46 //
47 // Part of output of SizeClassMap::Print():
48 // c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
49 // c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
50 // c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2
51 // c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3
52 // c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4
53 // c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5
54 // c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6
55 // c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7
56 //
57 // c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8
58 // c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9
59 // c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10
60 // c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11
61 // c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12
62 // c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13
63 // c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14
64 // c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
65 //
66 // c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
67 // c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17
68 // c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18
69 // c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19
70 //
71 // c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20
72 // c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21
73 // c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22
74 // c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23
75 //
76 // c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24
77 // c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25
78 // c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26
79 // c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27
80 //
81 // ...
82 //
83 // c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
84 // c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
85 // c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
86 // c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
87 //
88 // c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
89 
90 template <uptr kMaxSizeLog, uptr kMaxNumCachedT, uptr kMaxBytesCachedLog>
91 class SizeClassMap {
92   static const uptr kMinSizeLog = 4;
93   static const uptr kMidSizeLog = kMinSizeLog + 4;
94   static const uptr kMinSize = 1 << kMinSizeLog;
95   static const uptr kMidSize = 1 << kMidSizeLog;
96   static const uptr kMidClass = kMidSize / kMinSize;
97   static const uptr S = 2;
98   static const uptr M = (1 << S) - 1;
99 
100  public:
101   static const uptr kMaxNumCached = kMaxNumCachedT;
102   // We transfer chunks between central and thread-local free lists in batches.
103   // For small size classes we allocate batches separately.
104   // For large size classes we use one of the chunks to store the batch.
105   struct TransferBatch {
106     TransferBatch *next;
107     uptr count;
108     void *batch[kMaxNumCached];
109   };
110 
111   static const uptr kMaxSize = 1UL << kMaxSizeLog;
112   static const uptr kNumClasses =
113       kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1;
114   COMPILER_CHECK(kNumClasses >= 32 && kNumClasses <= 256);
115   static const uptr kNumClassesRounded =
116       kNumClasses == 32  ? 32 :
117       kNumClasses <= 64  ? 64 :
118       kNumClasses <= 128 ? 128 : 256;
119 
Size(uptr class_id)120   static uptr Size(uptr class_id) {
121     if (class_id <= kMidClass)
122       return kMinSize * class_id;
123     class_id -= kMidClass;
124     uptr t = kMidSize << (class_id >> S);
125     return t + (t >> S) * (class_id & M);
126   }
127 
ClassID(uptr size)128   static uptr ClassID(uptr size) {
129     if (size <= kMidSize)
130       return (size + kMinSize - 1) >> kMinSizeLog;
131     if (size > kMaxSize) return 0;
132     uptr l = MostSignificantSetBitIndex(size);
133     uptr hbits = (size >> (l - S)) & M;
134     uptr lbits = size & ((1 << (l - S)) - 1);
135     uptr l1 = l - kMidSizeLog;
136     return kMidClass + (l1 << S) + hbits + (lbits > 0);
137   }
138 
MaxCached(uptr class_id)139   static uptr MaxCached(uptr class_id) {
140     if (class_id == 0) return 0;
141     uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id);
142     return Max<uptr>(1, Min(kMaxNumCached, n));
143   }
144 
Print()145   static void Print() {
146     uptr prev_s = 0;
147     uptr total_cached = 0;
148     for (uptr i = 0; i < kNumClasses; i++) {
149       uptr s = Size(i);
150       if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
151         Printf("\n");
152       uptr d = s - prev_s;
153       uptr p = prev_s ? (d * 100 / prev_s) : 0;
154       uptr l = s ? MostSignificantSetBitIndex(s) : 0;
155       uptr cached = MaxCached(i) * s;
156       Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
157              "cached: %zd %zd; id %zd\n",
158              i, Size(i), d, p, l, MaxCached(i), cached, ClassID(s));
159       total_cached += cached;
160       prev_s = s;
161     }
162     Printf("Total cached: %zd\n", total_cached);
163   }
164 
SizeClassRequiresSeparateTransferBatch(uptr class_id)165   static bool SizeClassRequiresSeparateTransferBatch(uptr class_id) {
166     return Size(class_id) < sizeof(TransferBatch) -
167         sizeof(uptr) * (kMaxNumCached - MaxCached(class_id));
168   }
169 
Validate()170   static void Validate() {
171     for (uptr c = 1; c < kNumClasses; c++) {
172       // Printf("Validate: c%zd\n", c);
173       uptr s = Size(c);
174       CHECK_NE(s, 0U);
175       CHECK_EQ(ClassID(s), c);
176       if (c != kNumClasses - 1)
177         CHECK_EQ(ClassID(s + 1), c + 1);
178       CHECK_EQ(ClassID(s - 1), c);
179       if (c)
180         CHECK_GT(Size(c), Size(c-1));
181     }
182     CHECK_EQ(ClassID(kMaxSize + 1), 0);
183 
184     for (uptr s = 1; s <= kMaxSize; s++) {
185       uptr c = ClassID(s);
186       // Printf("s%zd => c%zd\n", s, c);
187       CHECK_LT(c, kNumClasses);
188       CHECK_GE(Size(c), s);
189       if (c > 0)
190         CHECK_LT(Size(c-1), s);
191     }
192   }
193 };
194 
195 typedef SizeClassMap<17, 128, 16> DefaultSizeClassMap;
196 typedef SizeClassMap<17, 64,  14> CompactSizeClassMap;
197 template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache;
198 
199 // Memory allocator statistics
200 enum AllocatorStat {
201   AllocatorStatAllocated,
202   AllocatorStatMapped,
203   AllocatorStatCount
204 };
205 
206 typedef uptr AllocatorStatCounters[AllocatorStatCount];
207 
208 // Per-thread stats, live in per-thread cache.
209 class AllocatorStats {
210  public:
Init()211   void Init() {
212     internal_memset(this, 0, sizeof(*this));
213   }
214 
Add(AllocatorStat i,uptr v)215   void Add(AllocatorStat i, uptr v) {
216     v += atomic_load(&stats_[i], memory_order_relaxed);
217     atomic_store(&stats_[i], v, memory_order_relaxed);
218   }
219 
Sub(AllocatorStat i,uptr v)220   void Sub(AllocatorStat i, uptr v) {
221     v = atomic_load(&stats_[i], memory_order_relaxed) - v;
222     atomic_store(&stats_[i], v, memory_order_relaxed);
223   }
224 
Set(AllocatorStat i,uptr v)225   void Set(AllocatorStat i, uptr v) {
226     atomic_store(&stats_[i], v, memory_order_relaxed);
227   }
228 
Get(AllocatorStat i)229   uptr Get(AllocatorStat i) const {
230     return atomic_load(&stats_[i], memory_order_relaxed);
231   }
232 
233  private:
234   friend class AllocatorGlobalStats;
235   AllocatorStats *next_;
236   AllocatorStats *prev_;
237   atomic_uintptr_t stats_[AllocatorStatCount];
238 };
239 
240 // Global stats, used for aggregation and querying.
241 class AllocatorGlobalStats : public AllocatorStats {
242  public:
Init()243   void Init() {
244     internal_memset(this, 0, sizeof(*this));
245     next_ = this;
246     prev_ = this;
247   }
248 
Register(AllocatorStats * s)249   void Register(AllocatorStats *s) {
250     SpinMutexLock l(&mu_);
251     s->next_ = next_;
252     s->prev_ = this;
253     next_->prev_ = s;
254     next_ = s;
255   }
256 
Unregister(AllocatorStats * s)257   void Unregister(AllocatorStats *s) {
258     SpinMutexLock l(&mu_);
259     s->prev_->next_ = s->next_;
260     s->next_->prev_ = s->prev_;
261     for (int i = 0; i < AllocatorStatCount; i++)
262       Add(AllocatorStat(i), s->Get(AllocatorStat(i)));
263   }
264 
Get(AllocatorStatCounters s)265   void Get(AllocatorStatCounters s) const {
266     internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
267     SpinMutexLock l(&mu_);
268     const AllocatorStats *stats = this;
269     for (;;) {
270       for (int i = 0; i < AllocatorStatCount; i++)
271         s[i] += stats->Get(AllocatorStat(i));
272       stats = stats->next_;
273       if (stats == this)
274         break;
275     }
276     // All stats must be non-negative.
277     for (int i = 0; i < AllocatorStatCount; i++)
278       s[i] = ((sptr)s[i]) >= 0 ? s[i] : 0;
279   }
280 
281  private:
282   mutable SpinMutex mu_;
283 };
284 
285 // Allocators call these callbacks on mmap/munmap.
286 struct NoOpMapUnmapCallback {
OnMapNoOpMapUnmapCallback287   void OnMap(uptr p, uptr size) const { }
OnUnmapNoOpMapUnmapCallback288   void OnUnmap(uptr p, uptr size) const { }
289 };
290 
291 // Callback type for iterating over chunks.
292 typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
293 
294 // SizeClassAllocator64 -- allocator for 64-bit address space.
295 //
296 // Space: a portion of address space of kSpaceSize bytes starting at
297 // a fixed address (kSpaceBeg). Both constants are powers of two and
298 // kSpaceBeg is kSpaceSize-aligned.
299 // At the beginning the entire space is mprotect-ed, then small parts of it
300 // are mapped on demand.
301 //
302 // Region: a part of Space dedicated to a single size class.
303 // There are kNumClasses Regions of equal size.
304 //
305 // UserChunk: a piece of memory returned to user.
306 // MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
307 //
308 // A Region looks like this:
309 // UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
310 template <const uptr kSpaceBeg, const uptr kSpaceSize,
311           const uptr kMetadataSize, class SizeClassMap,
312           class MapUnmapCallback = NoOpMapUnmapCallback>
313 class SizeClassAllocator64 {
314  public:
315   typedef typename SizeClassMap::TransferBatch Batch;
316   typedef SizeClassAllocator64<kSpaceBeg, kSpaceSize, kMetadataSize,
317       SizeClassMap, MapUnmapCallback> ThisT;
318   typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
319 
Init()320   void Init() {
321     CHECK_EQ(kSpaceBeg,
322              reinterpret_cast<uptr>(Mprotect(kSpaceBeg, kSpaceSize)));
323     MapWithCallback(kSpaceEnd, AdditionalSize());
324   }
325 
MapWithCallback(uptr beg,uptr size)326   void MapWithCallback(uptr beg, uptr size) {
327     CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
328     MapUnmapCallback().OnMap(beg, size);
329   }
330 
UnmapWithCallback(uptr beg,uptr size)331   void UnmapWithCallback(uptr beg, uptr size) {
332     MapUnmapCallback().OnUnmap(beg, size);
333     UnmapOrDie(reinterpret_cast<void *>(beg), size);
334   }
335 
CanAllocate(uptr size,uptr alignment)336   static bool CanAllocate(uptr size, uptr alignment) {
337     return size <= SizeClassMap::kMaxSize &&
338       alignment <= SizeClassMap::kMaxSize;
339   }
340 
AllocateBatch(AllocatorStats * stat,AllocatorCache * c,uptr class_id)341   NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
342                                 uptr class_id) {
343     CHECK_LT(class_id, kNumClasses);
344     RegionInfo *region = GetRegionInfo(class_id);
345     Batch *b = region->free_list.Pop();
346     if (b == 0)
347       b = PopulateFreeList(stat, c, class_id, region);
348     region->n_allocated += b->count;
349     return b;
350   }
351 
DeallocateBatch(AllocatorStats * stat,uptr class_id,Batch * b)352   NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
353     RegionInfo *region = GetRegionInfo(class_id);
354     CHECK_GT(b->count, 0);
355     region->free_list.Push(b);
356     region->n_freed += b->count;
357   }
358 
PointerIsMine(const void * p)359   static bool PointerIsMine(const void *p) {
360     return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
361   }
362 
GetSizeClass(const void * p)363   static uptr GetSizeClass(const void *p) {
364     return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClassesRounded;
365   }
366 
GetBlockBegin(const void * p)367   void *GetBlockBegin(const void *p) {
368     uptr class_id = GetSizeClass(p);
369     uptr size = SizeClassMap::Size(class_id);
370     if (!size) return 0;
371     uptr chunk_idx = GetChunkIdx((uptr)p, size);
372     uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
373     uptr beg = chunk_idx * size;
374     uptr next_beg = beg + size;
375     if (class_id >= kNumClasses) return 0;
376     RegionInfo *region = GetRegionInfo(class_id);
377     if (region->mapped_user >= next_beg)
378       return reinterpret_cast<void*>(reg_beg + beg);
379     return 0;
380   }
381 
GetActuallyAllocatedSize(void * p)382   static uptr GetActuallyAllocatedSize(void *p) {
383     CHECK(PointerIsMine(p));
384     return SizeClassMap::Size(GetSizeClass(p));
385   }
386 
ClassID(uptr size)387   uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
388 
GetMetaData(const void * p)389   void *GetMetaData(const void *p) {
390     uptr class_id = GetSizeClass(p);
391     uptr size = SizeClassMap::Size(class_id);
392     uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
393     return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) -
394                                    (1 + chunk_idx) * kMetadataSize);
395   }
396 
TotalMemoryUsed()397   uptr TotalMemoryUsed() {
398     uptr res = 0;
399     for (uptr i = 0; i < kNumClasses; i++)
400       res += GetRegionInfo(i)->allocated_user;
401     return res;
402   }
403 
404   // Test-only.
TestOnlyUnmap()405   void TestOnlyUnmap() {
406     UnmapWithCallback(kSpaceBeg, kSpaceSize + AdditionalSize());
407   }
408 
PrintStats()409   void PrintStats() {
410     uptr total_mapped = 0;
411     uptr n_allocated = 0;
412     uptr n_freed = 0;
413     for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
414       RegionInfo *region = GetRegionInfo(class_id);
415       total_mapped += region->mapped_user;
416       n_allocated += region->n_allocated;
417       n_freed += region->n_freed;
418     }
419     Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; "
420            "remains %zd\n",
421            total_mapped >> 20, n_allocated, n_allocated - n_freed);
422     for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
423       RegionInfo *region = GetRegionInfo(class_id);
424       if (region->mapped_user == 0) continue;
425       Printf("  %02zd (%zd): total: %zd K allocs: %zd remains: %zd\n",
426              class_id,
427              SizeClassMap::Size(class_id),
428              region->mapped_user >> 10,
429              region->n_allocated,
430              region->n_allocated - region->n_freed);
431     }
432   }
433 
434   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
435   // introspection API.
ForceLock()436   void ForceLock() {
437     for (uptr i = 0; i < kNumClasses; i++) {
438       GetRegionInfo(i)->mutex.Lock();
439     }
440   }
441 
ForceUnlock()442   void ForceUnlock() {
443     for (int i = (int)kNumClasses - 1; i >= 0; i--) {
444       GetRegionInfo(i)->mutex.Unlock();
445     }
446   }
447 
448   // Iterate over all existing chunks.
449   // The allocator must be locked when calling this function.
ForEachChunk(ForEachChunkCallback callback,void * arg)450   void ForEachChunk(ForEachChunkCallback callback, void *arg) {
451     for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
452       RegionInfo *region = GetRegionInfo(class_id);
453       uptr chunk_size = SizeClassMap::Size(class_id);
454       uptr region_beg = kSpaceBeg + class_id * kRegionSize;
455       for (uptr chunk = region_beg;
456            chunk < region_beg + region->allocated_user;
457            chunk += chunk_size) {
458         // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
459         callback(chunk, arg);
460       }
461     }
462   }
463 
464   typedef SizeClassMap SizeClassMapT;
465   static const uptr kNumClasses = SizeClassMap::kNumClasses;
466   static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
467 
468  private:
469   static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
470   static const uptr kSpaceEnd = kSpaceBeg + kSpaceSize;
471   COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
472   // kRegionSize must be >= 2^32.
473   COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
474   // Populate the free list with at most this number of bytes at once
475   // or with one element if its size is greater.
476   static const uptr kPopulateSize = 1 << 14;
477   // Call mmap for user memory with at least this size.
478   static const uptr kUserMapSize = 1 << 16;
479   // Call mmap for metadata memory with at least this size.
480   static const uptr kMetaMapSize = 1 << 16;
481 
482   struct RegionInfo {
483     BlockingMutex mutex;
484     LFStack<Batch> free_list;
485     uptr allocated_user;  // Bytes allocated for user memory.
486     uptr allocated_meta;  // Bytes allocated for metadata.
487     uptr mapped_user;  // Bytes mapped for user memory.
488     uptr mapped_meta;  // Bytes mapped for metadata.
489     uptr n_allocated, n_freed;  // Just stats.
490   };
491   COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
492 
AdditionalSize()493   static uptr AdditionalSize() {
494     return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
495                      GetPageSizeCached());
496   }
497 
GetRegionInfo(uptr class_id)498   RegionInfo *GetRegionInfo(uptr class_id) {
499     CHECK_LT(class_id, kNumClasses);
500     RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg + kSpaceSize);
501     return &regions[class_id];
502   }
503 
GetChunkIdx(uptr chunk,uptr size)504   static uptr GetChunkIdx(uptr chunk, uptr size) {
505     uptr offset = chunk % kRegionSize;
506     // Here we divide by a non-constant. This is costly.
507     // size always fits into 32-bits. If the offset fits too, use 32-bit div.
508     if (offset >> (SANITIZER_WORDSIZE / 2))
509       return offset / size;
510     return (u32)offset / (u32)size;
511   }
512 
PopulateFreeList(AllocatorStats * stat,AllocatorCache * c,uptr class_id,RegionInfo * region)513   NOINLINE Batch* PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
514                                    uptr class_id, RegionInfo *region) {
515     BlockingMutexLock l(&region->mutex);
516     Batch *b = region->free_list.Pop();
517     if (b)
518       return b;
519     uptr size = SizeClassMap::Size(class_id);
520     uptr count = size < kPopulateSize ? SizeClassMap::MaxCached(class_id) : 1;
521     uptr beg_idx = region->allocated_user;
522     uptr end_idx = beg_idx + count * size;
523     uptr region_beg = kSpaceBeg + kRegionSize * class_id;
524     if (end_idx + size > region->mapped_user) {
525       // Do the mmap for the user memory.
526       uptr map_size = kUserMapSize;
527       while (end_idx + size > region->mapped_user + map_size)
528         map_size += kUserMapSize;
529       CHECK_GE(region->mapped_user + map_size, end_idx);
530       MapWithCallback(region_beg + region->mapped_user, map_size);
531       stat->Add(AllocatorStatMapped, map_size);
532       region->mapped_user += map_size;
533     }
534     uptr total_count = (region->mapped_user - beg_idx - size)
535         / size / count * count;
536     region->allocated_meta += total_count * kMetadataSize;
537     if (region->allocated_meta > region->mapped_meta) {
538       uptr map_size = kMetaMapSize;
539       while (region->allocated_meta > region->mapped_meta + map_size)
540         map_size += kMetaMapSize;
541       // Do the mmap for the metadata.
542       CHECK_GE(region->mapped_meta + map_size, region->allocated_meta);
543       MapWithCallback(region_beg + kRegionSize -
544                       region->mapped_meta - map_size, map_size);
545       region->mapped_meta += map_size;
546     }
547     CHECK_LE(region->allocated_meta, region->mapped_meta);
548     if (region->mapped_user + region->mapped_meta > kRegionSize) {
549       Printf("%s: Out of memory. Dying. ", SanitizerToolName);
550       Printf("The process has exhausted %zuMB for size class %zu.\n",
551           kRegionSize / 1024 / 1024, size);
552       Die();
553     }
554     for (;;) {
555       if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
556         b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
557       else
558         b = (Batch*)(region_beg + beg_idx);
559       b->count = count;
560       for (uptr i = 0; i < count; i++)
561         b->batch[i] = (void*)(region_beg + beg_idx + i * size);
562       region->allocated_user += count * size;
563       CHECK_LE(region->allocated_user, region->mapped_user);
564       beg_idx += count * size;
565       if (beg_idx + count * size + size > region->mapped_user)
566         break;
567       CHECK_GT(b->count, 0);
568       region->free_list.Push(b);
569     }
570     return b;
571   }
572 };
573 
574 // Maps integers in rage [0, kSize) to u8 values.
575 template<u64 kSize>
576 class FlatByteMap {
577  public:
TestOnlyInit()578   void TestOnlyInit() {
579     internal_memset(map_, 0, sizeof(map_));
580   }
581 
set(uptr idx,u8 val)582   void set(uptr idx, u8 val) {
583     CHECK_LT(idx, kSize);
584     CHECK_EQ(0U, map_[idx]);
585     map_[idx] = val;
586   }
587   u8 operator[] (uptr idx) {
588     CHECK_LT(idx, kSize);
589     // FIXME: CHECK may be too expensive here.
590     return map_[idx];
591   }
592  private:
593   u8 map_[kSize];
594 };
595 
596 // TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values.
597 // It is implemented as a two-dimensional array: array of kSize1 pointers
598 // to kSize2-byte arrays. The secondary arrays are mmaped on demand.
599 // Each value is initially zero and can be set to something else only once.
600 // Setting and getting values from multiple threads is safe w/o extra locking.
601 template <u64 kSize1, u64 kSize2, class MapUnmapCallback = NoOpMapUnmapCallback>
602 class TwoLevelByteMap {
603  public:
TestOnlyInit()604   void TestOnlyInit() {
605     internal_memset(map1_, 0, sizeof(map1_));
606     mu_.Init();
607   }
TestOnlyUnmap()608   void TestOnlyUnmap() {
609     for (uptr i = 0; i < kSize1; i++) {
610       u8 *p = Get(i);
611       if (!p) continue;
612       MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2);
613       UnmapOrDie(p, kSize2);
614     }
615   }
616 
size()617   uptr size() const { return kSize1 * kSize2; }
size1()618   uptr size1() const { return kSize1; }
size2()619   uptr size2() const { return kSize2; }
620 
set(uptr idx,u8 val)621   void set(uptr idx, u8 val) {
622     CHECK_LT(idx, kSize1 * kSize2);
623     u8 *map2 = GetOrCreate(idx / kSize2);
624     CHECK_EQ(0U, map2[idx % kSize2]);
625     map2[idx % kSize2] = val;
626   }
627 
628   u8 operator[] (uptr idx) const {
629     CHECK_LT(idx, kSize1 * kSize2);
630     u8 *map2 = Get(idx / kSize2);
631     if (!map2) return 0;
632     return map2[idx % kSize2];
633   }
634 
635  private:
Get(uptr idx)636   u8 *Get(uptr idx) const {
637     CHECK_LT(idx, kSize1);
638     return reinterpret_cast<u8 *>(
639         atomic_load(&map1_[idx], memory_order_acquire));
640   }
641 
GetOrCreate(uptr idx)642   u8 *GetOrCreate(uptr idx) {
643     u8 *res = Get(idx);
644     if (!res) {
645       SpinMutexLock l(&mu_);
646       if (!(res = Get(idx))) {
647         res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap");
648         MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);
649         atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
650                      memory_order_release);
651       }
652     }
653     return res;
654   }
655 
656   atomic_uintptr_t map1_[kSize1];
657   StaticSpinMutex mu_;
658 };
659 
660 // SizeClassAllocator32 -- allocator for 32-bit address space.
661 // This allocator can theoretically be used on 64-bit arch, but there it is less
662 // efficient than SizeClassAllocator64.
663 //
664 // [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
665 // be returned by MmapOrDie().
666 //
667 // Region:
668 //   a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
669 // Since the regions are aligned by kRegionSize, there are exactly
670 // kNumPossibleRegions possible regions in the address space and so we keep
671 // a ByteMap possible_regions to store the size classes of each Region.
672 // 0 size class means the region is not used by the allocator.
673 //
674 // One Region is used to allocate chunks of a single size class.
675 // A Region looks like this:
676 // UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
677 //
678 // In order to avoid false sharing the objects of this class should be
679 // chache-line aligned.
680 template <const uptr kSpaceBeg, const u64 kSpaceSize,
681           const uptr kMetadataSize, class SizeClassMap,
682           const uptr kRegionSizeLog,
683           class ByteMap,
684           class MapUnmapCallback = NoOpMapUnmapCallback>
685 class SizeClassAllocator32 {
686  public:
687   typedef typename SizeClassMap::TransferBatch Batch;
688   typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize,
689       SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT;
690   typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
691 
Init()692   void Init() {
693     possible_regions.TestOnlyInit();
694     internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
695   }
696 
MapWithCallback(uptr size)697   void *MapWithCallback(uptr size) {
698     size = RoundUpTo(size, GetPageSizeCached());
699     void *res = MmapOrDie(size, "SizeClassAllocator32");
700     MapUnmapCallback().OnMap((uptr)res, size);
701     return res;
702   }
703 
UnmapWithCallback(uptr beg,uptr size)704   void UnmapWithCallback(uptr beg, uptr size) {
705     MapUnmapCallback().OnUnmap(beg, size);
706     UnmapOrDie(reinterpret_cast<void *>(beg), size);
707   }
708 
CanAllocate(uptr size,uptr alignment)709   static bool CanAllocate(uptr size, uptr alignment) {
710     return size <= SizeClassMap::kMaxSize &&
711       alignment <= SizeClassMap::kMaxSize;
712   }
713 
GetMetaData(const void * p)714   void *GetMetaData(const void *p) {
715     CHECK(PointerIsMine(p));
716     uptr mem = reinterpret_cast<uptr>(p);
717     uptr beg = ComputeRegionBeg(mem);
718     uptr size = SizeClassMap::Size(GetSizeClass(p));
719     u32 offset = mem - beg;
720     uptr n = offset / (u32)size;  // 32-bit division
721     uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
722     return reinterpret_cast<void*>(meta);
723   }
724 
AllocateBatch(AllocatorStats * stat,AllocatorCache * c,uptr class_id)725   NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
726                                 uptr class_id) {
727     CHECK_LT(class_id, kNumClasses);
728     SizeClassInfo *sci = GetSizeClassInfo(class_id);
729     SpinMutexLock l(&sci->mutex);
730     if (sci->free_list.empty())
731       PopulateFreeList(stat, c, sci, class_id);
732     CHECK(!sci->free_list.empty());
733     Batch *b = sci->free_list.front();
734     sci->free_list.pop_front();
735     return b;
736   }
737 
DeallocateBatch(AllocatorStats * stat,uptr class_id,Batch * b)738   NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
739     CHECK_LT(class_id, kNumClasses);
740     SizeClassInfo *sci = GetSizeClassInfo(class_id);
741     SpinMutexLock l(&sci->mutex);
742     CHECK_GT(b->count, 0);
743     sci->free_list.push_front(b);
744   }
745 
PointerIsMine(const void * p)746   bool PointerIsMine(const void *p) {
747     return GetSizeClass(p) != 0;
748   }
749 
GetSizeClass(const void * p)750   uptr GetSizeClass(const void *p) {
751     return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
752   }
753 
GetBlockBegin(const void * p)754   void *GetBlockBegin(const void *p) {
755     CHECK(PointerIsMine(p));
756     uptr mem = reinterpret_cast<uptr>(p);
757     uptr beg = ComputeRegionBeg(mem);
758     uptr size = SizeClassMap::Size(GetSizeClass(p));
759     u32 offset = mem - beg;
760     u32 n = offset / (u32)size;  // 32-bit division
761     uptr res = beg + (n * (u32)size);
762     return reinterpret_cast<void*>(res);
763   }
764 
GetActuallyAllocatedSize(void * p)765   uptr GetActuallyAllocatedSize(void *p) {
766     CHECK(PointerIsMine(p));
767     return SizeClassMap::Size(GetSizeClass(p));
768   }
769 
ClassID(uptr size)770   uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
771 
TotalMemoryUsed()772   uptr TotalMemoryUsed() {
773     // No need to lock here.
774     uptr res = 0;
775     for (uptr i = 0; i < kNumPossibleRegions; i++)
776       if (possible_regions[i])
777         res += kRegionSize;
778     return res;
779   }
780 
TestOnlyUnmap()781   void TestOnlyUnmap() {
782     for (uptr i = 0; i < kNumPossibleRegions; i++)
783       if (possible_regions[i])
784         UnmapWithCallback((i * kRegionSize), kRegionSize);
785   }
786 
787   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
788   // introspection API.
ForceLock()789   void ForceLock() {
790     for (uptr i = 0; i < kNumClasses; i++) {
791       GetSizeClassInfo(i)->mutex.Lock();
792     }
793   }
794 
ForceUnlock()795   void ForceUnlock() {
796     for (int i = kNumClasses - 1; i >= 0; i--) {
797       GetSizeClassInfo(i)->mutex.Unlock();
798     }
799   }
800 
801   // Iterate over all existing chunks.
802   // The allocator must be locked when calling this function.
ForEachChunk(ForEachChunkCallback callback,void * arg)803   void ForEachChunk(ForEachChunkCallback callback, void *arg) {
804     for (uptr region = 0; region < kNumPossibleRegions; region++)
805       if (possible_regions[region]) {
806         uptr chunk_size = SizeClassMap::Size(possible_regions[region]);
807         uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
808         uptr region_beg = region * kRegionSize;
809         for (uptr chunk = region_beg;
810              chunk < region_beg + max_chunks_in_region * chunk_size;
811              chunk += chunk_size) {
812           // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
813           callback(chunk, arg);
814         }
815       }
816   }
817 
PrintStats()818   void PrintStats() {
819   }
820 
821   typedef SizeClassMap SizeClassMapT;
822   static const uptr kNumClasses = SizeClassMap::kNumClasses;
823 
824  private:
825   static const uptr kRegionSize = 1 << kRegionSizeLog;
826   static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
827 
828   struct SizeClassInfo {
829     SpinMutex mutex;
830     IntrusiveList<Batch> free_list;
831     char padding[kCacheLineSize - sizeof(uptr) - sizeof(IntrusiveList<Batch>)];
832   };
833   COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
834 
ComputeRegionId(uptr mem)835   uptr ComputeRegionId(uptr mem) {
836     uptr res = mem >> kRegionSizeLog;
837     CHECK_LT(res, kNumPossibleRegions);
838     return res;
839   }
840 
ComputeRegionBeg(uptr mem)841   uptr ComputeRegionBeg(uptr mem) {
842     return mem & ~(kRegionSize - 1);
843   }
844 
AllocateRegion(AllocatorStats * stat,uptr class_id)845   uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
846     CHECK_LT(class_id, kNumClasses);
847     uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
848                                       "SizeClassAllocator32"));
849     MapUnmapCallback().OnMap(res, kRegionSize);
850     stat->Add(AllocatorStatMapped, kRegionSize);
851     CHECK_EQ(0U, (res & (kRegionSize - 1)));
852     possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
853     return res;
854   }
855 
GetSizeClassInfo(uptr class_id)856   SizeClassInfo *GetSizeClassInfo(uptr class_id) {
857     CHECK_LT(class_id, kNumClasses);
858     return &size_class_info_array[class_id];
859   }
860 
PopulateFreeList(AllocatorStats * stat,AllocatorCache * c,SizeClassInfo * sci,uptr class_id)861   void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
862                         SizeClassInfo *sci, uptr class_id) {
863     uptr size = SizeClassMap::Size(class_id);
864     uptr reg = AllocateRegion(stat, class_id);
865     uptr n_chunks = kRegionSize / (size + kMetadataSize);
866     uptr max_count = SizeClassMap::MaxCached(class_id);
867     Batch *b = 0;
868     for (uptr i = reg; i < reg + n_chunks * size; i += size) {
869       if (b == 0) {
870         if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
871           b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
872         else
873           b = (Batch*)i;
874         b->count = 0;
875       }
876       b->batch[b->count++] = (void*)i;
877       if (b->count == max_count) {
878         CHECK_GT(b->count, 0);
879         sci->free_list.push_back(b);
880         b = 0;
881       }
882     }
883     if (b) {
884       CHECK_GT(b->count, 0);
885       sci->free_list.push_back(b);
886     }
887   }
888 
889   ByteMap possible_regions;
890   SizeClassInfo size_class_info_array[kNumClasses];
891 };
892 
893 // Objects of this type should be used as local caches for SizeClassAllocator64
894 // or SizeClassAllocator32. Since the typical use of this class is to have one
895 // object per thread in TLS, is has to be POD.
896 template<class SizeClassAllocator>
897 struct SizeClassAllocatorLocalCache {
898   typedef SizeClassAllocator Allocator;
899   static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
900 
InitSizeClassAllocatorLocalCache901   void Init(AllocatorGlobalStats *s) {
902     stats_.Init();
903     if (s)
904       s->Register(&stats_);
905   }
906 
DestroySizeClassAllocatorLocalCache907   void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
908     Drain(allocator);
909     if (s)
910       s->Unregister(&stats_);
911   }
912 
AllocateSizeClassAllocatorLocalCache913   void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
914     CHECK_NE(class_id, 0UL);
915     CHECK_LT(class_id, kNumClasses);
916     stats_.Add(AllocatorStatAllocated, SizeClassMap::Size(class_id));
917     PerClass *c = &per_class_[class_id];
918     if (UNLIKELY(c->count == 0))
919       Refill(allocator, class_id);
920     void *res = c->batch[--c->count];
921     PREFETCH(c->batch[c->count - 1]);
922     return res;
923   }
924 
DeallocateSizeClassAllocatorLocalCache925   void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
926     CHECK_NE(class_id, 0UL);
927     CHECK_LT(class_id, kNumClasses);
928     // If the first allocator call on a new thread is a deallocation, then
929     // max_count will be zero, leading to check failure.
930     InitCache();
931     stats_.Sub(AllocatorStatAllocated, SizeClassMap::Size(class_id));
932     PerClass *c = &per_class_[class_id];
933     CHECK_NE(c->max_count, 0UL);
934     if (UNLIKELY(c->count == c->max_count))
935       Drain(allocator, class_id);
936     c->batch[c->count++] = p;
937   }
938 
DrainSizeClassAllocatorLocalCache939   void Drain(SizeClassAllocator *allocator) {
940     for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
941       PerClass *c = &per_class_[class_id];
942       while (c->count > 0)
943         Drain(allocator, class_id);
944     }
945   }
946 
947   // private:
948   typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
949   typedef typename SizeClassMap::TransferBatch Batch;
950   struct PerClass {
951     uptr count;
952     uptr max_count;
953     void *batch[2 * SizeClassMap::kMaxNumCached];
954   };
955   PerClass per_class_[kNumClasses];
956   AllocatorStats stats_;
957 
InitCacheSizeClassAllocatorLocalCache958   void InitCache() {
959     if (per_class_[1].max_count)
960       return;
961     for (uptr i = 0; i < kNumClasses; i++) {
962       PerClass *c = &per_class_[i];
963       c->max_count = 2 * SizeClassMap::MaxCached(i);
964     }
965   }
966 
RefillSizeClassAllocatorLocalCache967   NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
968     InitCache();
969     PerClass *c = &per_class_[class_id];
970     Batch *b = allocator->AllocateBatch(&stats_, this, class_id);
971     CHECK_GT(b->count, 0);
972     for (uptr i = 0; i < b->count; i++)
973       c->batch[i] = b->batch[i];
974     c->count = b->count;
975     if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
976       Deallocate(allocator, SizeClassMap::ClassID(sizeof(Batch)), b);
977   }
978 
DrainSizeClassAllocatorLocalCache979   NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
980     InitCache();
981     PerClass *c = &per_class_[class_id];
982     Batch *b;
983     if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
984       b = (Batch*)Allocate(allocator, SizeClassMap::ClassID(sizeof(Batch)));
985     else
986       b = (Batch*)c->batch[0];
987     uptr cnt = Min(c->max_count / 2, c->count);
988     for (uptr i = 0; i < cnt; i++) {
989       b->batch[i] = c->batch[i];
990       c->batch[i] = c->batch[i + c->max_count / 2];
991     }
992     b->count = cnt;
993     c->count -= cnt;
994     CHECK_GT(b->count, 0);
995     allocator->DeallocateBatch(&stats_, class_id, b);
996   }
997 };
998 
999 // This class can (de)allocate only large chunks of memory using mmap/unmap.
1000 // The main purpose of this allocator is to cover large and rare allocation
1001 // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
1002 template <class MapUnmapCallback = NoOpMapUnmapCallback>
1003 class LargeMmapAllocator {
1004  public:
Init()1005   void Init() {
1006     internal_memset(this, 0, sizeof(*this));
1007     page_size_ = GetPageSizeCached();
1008   }
1009 
Allocate(AllocatorStats * stat,uptr size,uptr alignment)1010   void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
1011     CHECK(IsPowerOfTwo(alignment));
1012     uptr map_size = RoundUpMapSize(size);
1013     if (alignment > page_size_)
1014       map_size += alignment;
1015     if (map_size < size) return AllocatorReturnNull();  // Overflow.
1016     uptr map_beg = reinterpret_cast<uptr>(
1017         MmapOrDie(map_size, "LargeMmapAllocator"));
1018     MapUnmapCallback().OnMap(map_beg, map_size);
1019     uptr map_end = map_beg + map_size;
1020     uptr res = map_beg + page_size_;
1021     if (res & (alignment - 1))  // Align.
1022       res += alignment - (res & (alignment - 1));
1023     CHECK_EQ(0, res & (alignment - 1));
1024     CHECK_LE(res + size, map_end);
1025     Header *h = GetHeader(res);
1026     h->size = size;
1027     h->map_beg = map_beg;
1028     h->map_size = map_size;
1029     uptr size_log = MostSignificantSetBitIndex(map_size);
1030     CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
1031     {
1032       SpinMutexLock l(&mutex_);
1033       uptr idx = n_chunks_++;
1034       chunks_sorted_ = false;
1035       CHECK_LT(idx, kMaxNumChunks);
1036       h->chunk_idx = idx;
1037       chunks_[idx] = h;
1038       stats.n_allocs++;
1039       stats.currently_allocated += map_size;
1040       stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
1041       stats.by_size_log[size_log]++;
1042       stat->Add(AllocatorStatAllocated, map_size);
1043       stat->Add(AllocatorStatMapped, map_size);
1044     }
1045     return reinterpret_cast<void*>(res);
1046   }
1047 
Deallocate(AllocatorStats * stat,void * p)1048   void Deallocate(AllocatorStats *stat, void *p) {
1049     Header *h = GetHeader(p);
1050     {
1051       SpinMutexLock l(&mutex_);
1052       uptr idx = h->chunk_idx;
1053       CHECK_EQ(chunks_[idx], h);
1054       CHECK_LT(idx, n_chunks_);
1055       chunks_[idx] = chunks_[n_chunks_ - 1];
1056       chunks_[idx]->chunk_idx = idx;
1057       n_chunks_--;
1058       chunks_sorted_ = false;
1059       stats.n_frees++;
1060       stats.currently_allocated -= h->map_size;
1061       stat->Sub(AllocatorStatAllocated, h->map_size);
1062       stat->Sub(AllocatorStatMapped, h->map_size);
1063     }
1064     MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
1065     UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
1066   }
1067 
TotalMemoryUsed()1068   uptr TotalMemoryUsed() {
1069     SpinMutexLock l(&mutex_);
1070     uptr res = 0;
1071     for (uptr i = 0; i < n_chunks_; i++) {
1072       Header *h = chunks_[i];
1073       CHECK_EQ(h->chunk_idx, i);
1074       res += RoundUpMapSize(h->size);
1075     }
1076     return res;
1077   }
1078 
PointerIsMine(const void * p)1079   bool PointerIsMine(const void *p) {
1080     return GetBlockBegin(p) != 0;
1081   }
1082 
GetActuallyAllocatedSize(void * p)1083   uptr GetActuallyAllocatedSize(void *p) {
1084     return RoundUpTo(GetHeader(p)->size, page_size_);
1085   }
1086 
1087   // At least page_size_/2 metadata bytes is available.
GetMetaData(const void * p)1088   void *GetMetaData(const void *p) {
1089     // Too slow: CHECK_EQ(p, GetBlockBegin(p));
1090     if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
1091       Printf("%s: bad pointer %p\n", SanitizerToolName, p);
1092       CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
1093     }
1094     return GetHeader(p) + 1;
1095   }
1096 
GetBlockBegin(const void * ptr)1097   void *GetBlockBegin(const void *ptr) {
1098     uptr p = reinterpret_cast<uptr>(ptr);
1099     SpinMutexLock l(&mutex_);
1100     uptr nearest_chunk = 0;
1101     // Cache-friendly linear search.
1102     for (uptr i = 0; i < n_chunks_; i++) {
1103       uptr ch = reinterpret_cast<uptr>(chunks_[i]);
1104       if (p < ch) continue;  // p is at left to this chunk, skip it.
1105       if (p - ch < p - nearest_chunk)
1106         nearest_chunk = ch;
1107     }
1108     if (!nearest_chunk)
1109       return 0;
1110     Header *h = reinterpret_cast<Header *>(nearest_chunk);
1111     CHECK_GE(nearest_chunk, h->map_beg);
1112     CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
1113     CHECK_LE(nearest_chunk, p);
1114     if (h->map_beg + h->map_size <= p)
1115       return 0;
1116     return GetUser(h);
1117   }
1118 
1119   // This function does the same as GetBlockBegin, but is much faster.
1120   // Must be called with the allocator locked.
GetBlockBeginFastLocked(void * ptr)1121   void *GetBlockBeginFastLocked(void *ptr) {
1122     mutex_.CheckLocked();
1123     uptr p = reinterpret_cast<uptr>(ptr);
1124     uptr n = n_chunks_;
1125     if (!n) return 0;
1126     if (!chunks_sorted_) {
1127       // Do one-time sort. chunks_sorted_ is reset in Allocate/Deallocate.
1128       SortArray(reinterpret_cast<uptr*>(chunks_), n);
1129       for (uptr i = 0; i < n; i++)
1130         chunks_[i]->chunk_idx = i;
1131       chunks_sorted_ = true;
1132       min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
1133       max_mmap_ = reinterpret_cast<uptr>(chunks_[n - 1]) +
1134           chunks_[n - 1]->map_size;
1135     }
1136     if (p < min_mmap_ || p >= max_mmap_)
1137       return 0;
1138     uptr beg = 0, end = n - 1;
1139     // This loop is a log(n) lower_bound. It does not check for the exact match
1140     // to avoid expensive cache-thrashing loads.
1141     while (end - beg >= 2) {
1142       uptr mid = (beg + end) / 2;  // Invariant: mid >= beg + 1
1143       if (p < reinterpret_cast<uptr>(chunks_[mid]))
1144         end = mid - 1;  // We are not interested in chunks_[mid].
1145       else
1146         beg = mid;  // chunks_[mid] may still be what we want.
1147     }
1148 
1149     if (beg < end) {
1150       CHECK_EQ(beg + 1, end);
1151       // There are 2 chunks left, choose one.
1152       if (p >= reinterpret_cast<uptr>(chunks_[end]))
1153         beg = end;
1154     }
1155 
1156     Header *h = chunks_[beg];
1157     if (h->map_beg + h->map_size <= p || p < h->map_beg)
1158       return 0;
1159     return GetUser(h);
1160   }
1161 
PrintStats()1162   void PrintStats() {
1163     Printf("Stats: LargeMmapAllocator: allocated %zd times, "
1164            "remains %zd (%zd K) max %zd M; by size logs: ",
1165            stats.n_allocs, stats.n_allocs - stats.n_frees,
1166            stats.currently_allocated >> 10, stats.max_allocated >> 20);
1167     for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
1168       uptr c = stats.by_size_log[i];
1169       if (!c) continue;
1170       Printf("%zd:%zd; ", i, c);
1171     }
1172     Printf("\n");
1173   }
1174 
1175   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
1176   // introspection API.
ForceLock()1177   void ForceLock() {
1178     mutex_.Lock();
1179   }
1180 
ForceUnlock()1181   void ForceUnlock() {
1182     mutex_.Unlock();
1183   }
1184 
1185   // Iterate over all existing chunks.
1186   // The allocator must be locked when calling this function.
ForEachChunk(ForEachChunkCallback callback,void * arg)1187   void ForEachChunk(ForEachChunkCallback callback, void *arg) {
1188     for (uptr i = 0; i < n_chunks_; i++)
1189       callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
1190   }
1191 
1192  private:
1193   static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
1194   struct Header {
1195     uptr map_beg;
1196     uptr map_size;
1197     uptr size;
1198     uptr chunk_idx;
1199   };
1200 
GetHeader(uptr p)1201   Header *GetHeader(uptr p) {
1202     CHECK(IsAligned(p, page_size_));
1203     return reinterpret_cast<Header*>(p - page_size_);
1204   }
GetHeader(const void * p)1205   Header *GetHeader(const void *p) {
1206     return GetHeader(reinterpret_cast<uptr>(p));
1207   }
1208 
GetUser(Header * h)1209   void *GetUser(Header *h) {
1210     CHECK(IsAligned((uptr)h, page_size_));
1211     return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
1212   }
1213 
RoundUpMapSize(uptr size)1214   uptr RoundUpMapSize(uptr size) {
1215     return RoundUpTo(size, page_size_) + page_size_;
1216   }
1217 
1218   uptr page_size_;
1219   Header *chunks_[kMaxNumChunks];
1220   uptr n_chunks_;
1221   uptr min_mmap_, max_mmap_;
1222   bool chunks_sorted_;
1223   struct Stats {
1224     uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
1225   } stats;
1226   SpinMutex mutex_;
1227 };
1228 
1229 // This class implements a complete memory allocator by using two
1230 // internal allocators:
1231 // PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
1232 //  When allocating 2^x bytes it should return 2^x aligned chunk.
1233 // PrimaryAllocator is used via a local AllocatorCache.
1234 // SecondaryAllocator can allocate anything, but is not efficient.
1235 template <class PrimaryAllocator, class AllocatorCache,
1236           class SecondaryAllocator>  // NOLINT
1237 class CombinedAllocator {
1238  public:
Init()1239   void Init() {
1240     primary_.Init();
1241     secondary_.Init();
1242     stats_.Init();
1243   }
1244 
1245   void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
1246                  bool cleared = false) {
1247     // Returning 0 on malloc(0) may break a lot of code.
1248     if (size == 0)
1249       size = 1;
1250     if (size + alignment < size)
1251       return AllocatorReturnNull();
1252     if (alignment > 8)
1253       size = RoundUpTo(size, alignment);
1254     void *res;
1255     bool from_primary = primary_.CanAllocate(size, alignment);
1256     if (from_primary)
1257       res = cache->Allocate(&primary_, primary_.ClassID(size));
1258     else
1259       res = secondary_.Allocate(&stats_, size, alignment);
1260     if (alignment > 8)
1261       CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
1262     if (cleared && res && from_primary)
1263       internal_bzero_aligned16(res, RoundUpTo(size, 16));
1264     return res;
1265   }
1266 
Deallocate(AllocatorCache * cache,void * p)1267   void Deallocate(AllocatorCache *cache, void *p) {
1268     if (!p) return;
1269     if (primary_.PointerIsMine(p))
1270       cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
1271     else
1272       secondary_.Deallocate(&stats_, p);
1273   }
1274 
Reallocate(AllocatorCache * cache,void * p,uptr new_size,uptr alignment)1275   void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
1276                    uptr alignment) {
1277     if (!p)
1278       return Allocate(cache, new_size, alignment);
1279     if (!new_size) {
1280       Deallocate(cache, p);
1281       return 0;
1282     }
1283     CHECK(PointerIsMine(p));
1284     uptr old_size = GetActuallyAllocatedSize(p);
1285     uptr memcpy_size = Min(new_size, old_size);
1286     void *new_p = Allocate(cache, new_size, alignment);
1287     if (new_p)
1288       internal_memcpy(new_p, p, memcpy_size);
1289     Deallocate(cache, p);
1290     return new_p;
1291   }
1292 
PointerIsMine(void * p)1293   bool PointerIsMine(void *p) {
1294     if (primary_.PointerIsMine(p))
1295       return true;
1296     return secondary_.PointerIsMine(p);
1297   }
1298 
FromPrimary(void * p)1299   bool FromPrimary(void *p) {
1300     return primary_.PointerIsMine(p);
1301   }
1302 
GetMetaData(const void * p)1303   void *GetMetaData(const void *p) {
1304     if (primary_.PointerIsMine(p))
1305       return primary_.GetMetaData(p);
1306     return secondary_.GetMetaData(p);
1307   }
1308 
GetBlockBegin(const void * p)1309   void *GetBlockBegin(const void *p) {
1310     if (primary_.PointerIsMine(p))
1311       return primary_.GetBlockBegin(p);
1312     return secondary_.GetBlockBegin(p);
1313   }
1314 
1315   // This function does the same as GetBlockBegin, but is much faster.
1316   // Must be called with the allocator locked.
GetBlockBeginFastLocked(void * p)1317   void *GetBlockBeginFastLocked(void *p) {
1318     if (primary_.PointerIsMine(p))
1319       return primary_.GetBlockBegin(p);
1320     return secondary_.GetBlockBeginFastLocked(p);
1321   }
1322 
GetActuallyAllocatedSize(void * p)1323   uptr GetActuallyAllocatedSize(void *p) {
1324     if (primary_.PointerIsMine(p))
1325       return primary_.GetActuallyAllocatedSize(p);
1326     return secondary_.GetActuallyAllocatedSize(p);
1327   }
1328 
TotalMemoryUsed()1329   uptr TotalMemoryUsed() {
1330     return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
1331   }
1332 
TestOnlyUnmap()1333   void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
1334 
InitCache(AllocatorCache * cache)1335   void InitCache(AllocatorCache *cache) {
1336     cache->Init(&stats_);
1337   }
1338 
DestroyCache(AllocatorCache * cache)1339   void DestroyCache(AllocatorCache *cache) {
1340     cache->Destroy(&primary_, &stats_);
1341   }
1342 
SwallowCache(AllocatorCache * cache)1343   void SwallowCache(AllocatorCache *cache) {
1344     cache->Drain(&primary_);
1345   }
1346 
GetStats(AllocatorStatCounters s)1347   void GetStats(AllocatorStatCounters s) const {
1348     stats_.Get(s);
1349   }
1350 
PrintStats()1351   void PrintStats() {
1352     primary_.PrintStats();
1353     secondary_.PrintStats();
1354   }
1355 
1356   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
1357   // introspection API.
ForceLock()1358   void ForceLock() {
1359     primary_.ForceLock();
1360     secondary_.ForceLock();
1361   }
1362 
ForceUnlock()1363   void ForceUnlock() {
1364     secondary_.ForceUnlock();
1365     primary_.ForceUnlock();
1366   }
1367 
1368   // Iterate over all existing chunks.
1369   // The allocator must be locked when calling this function.
ForEachChunk(ForEachChunkCallback callback,void * arg)1370   void ForEachChunk(ForEachChunkCallback callback, void *arg) {
1371     primary_.ForEachChunk(callback, arg);
1372     secondary_.ForEachChunk(callback, arg);
1373   }
1374 
1375  private:
1376   PrimaryAllocator primary_;
1377   SecondaryAllocator secondary_;
1378   AllocatorGlobalStats stats_;
1379 };
1380 
1381 // Returns true if calloc(size, n) should return 0 due to overflow in size*n.
1382 bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n);
1383 
1384 }  // namespace __sanitizer
1385 
1386 #endif  // SANITIZER_ALLOCATOR_H
1387 
1388