• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===--- Allocator.h - Simple memory allocation abstraction -----*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 ///
11 /// This file defines the MallocAllocator and BumpPtrAllocator interfaces. Both
12 /// of these conform to an LLVM "Allocator" concept which consists of an
13 /// Allocate method accepting a size and alignment, and a Deallocate accepting
14 /// a pointer and size. Further, the LLVM "Allocator" concept has overloads of
15 /// Allocate and Deallocate for setting size and alignment based on the final
16 /// type. These overloads are typically provided by a base class template \c
17 /// AllocatorBase.
18 ///
19 //===----------------------------------------------------------------------===//
20 
21 #ifndef LLVM_SUPPORT_ALLOCATOR_H
22 #define LLVM_SUPPORT_ALLOCATOR_H
23 
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/Support/Compiler.h"
26 #include "llvm/Support/MathExtras.h"
27 #include <algorithm>
28 #include <cassert>
29 #include <cstddef>
30 #include <cstdint>
31 #include <cstdlib>
32 #include <iterator>
33 #include <type_traits>
34 #include <utility>
35 
36 namespace llvm {
37 
38 /// \brief CRTP base class providing obvious overloads for the core \c
39 /// Allocate() methods of LLVM-style allocators.
40 ///
41 /// This base class both documents the full public interface exposed by all
42 /// LLVM-style allocators, and redirects all of the overloads to a single core
43 /// set of methods which the derived class must define.
44 template <typename DerivedT> class AllocatorBase {
45 public:
46   /// \brief Allocate \a Size bytes of \a Alignment aligned memory. This method
47   /// must be implemented by \c DerivedT.
Allocate(size_t Size,size_t Alignment)48   void *Allocate(size_t Size, size_t Alignment) {
49 #ifdef __clang__
50     static_assert(static_cast<void *(AllocatorBase::*)(size_t, size_t)>(
51                       &AllocatorBase::Allocate) !=
52                       static_cast<void *(DerivedT::*)(size_t, size_t)>(
53                           &DerivedT::Allocate),
54                   "Class derives from AllocatorBase without implementing the "
55                   "core Allocate(size_t, size_t) overload!");
56 #endif
57     return static_cast<DerivedT *>(this)->Allocate(Size, Alignment);
58   }
59 
60   /// \brief Deallocate \a Ptr to \a Size bytes of memory allocated by this
61   /// allocator.
Deallocate(const void * Ptr,size_t Size)62   void Deallocate(const void *Ptr, size_t Size) {
63 #ifdef __clang__
64     static_assert(static_cast<void (AllocatorBase::*)(const void *, size_t)>(
65                       &AllocatorBase::Deallocate) !=
66                       static_cast<void (DerivedT::*)(const void *, size_t)>(
67                           &DerivedT::Deallocate),
68                   "Class derives from AllocatorBase without implementing the "
69                   "core Deallocate(void *) overload!");
70 #endif
71     return static_cast<DerivedT *>(this)->Deallocate(Ptr, Size);
72   }
73 
74   // The rest of these methods are helpers that redirect to one of the above
75   // core methods.
76 
77   /// \brief Allocate space for a sequence of objects without constructing them.
78   template <typename T> T *Allocate(size_t Num = 1) {
79     return static_cast<T *>(Allocate(Num * sizeof(T), alignof(T)));
80   }
81 
82   /// \brief Deallocate space for a sequence of objects without constructing them.
83   template <typename T>
84   typename std::enable_if<
85       !std::is_same<typename std::remove_cv<T>::type, void>::value, void>::type
86   Deallocate(T *Ptr, size_t Num = 1) {
87     Deallocate(static_cast<const void *>(Ptr), Num * sizeof(T));
88   }
89 };
90 
91 class MallocAllocator : public AllocatorBase<MallocAllocator> {
92 public:
Reset()93   void Reset() {}
94 
Allocate(size_t Size,size_t)95   LLVM_ATTRIBUTE_RETURNS_NONNULL void *Allocate(size_t Size,
96                                                 size_t /*Alignment*/) {
97     return malloc(Size);
98   }
99 
100   // Pull in base class overloads.
101   using AllocatorBase<MallocAllocator>::Allocate;
102 
Deallocate(const void * Ptr,size_t)103   void Deallocate(const void *Ptr, size_t /*Size*/) {
104     free(const_cast<void *>(Ptr));
105   }
106 
107   // Pull in base class overloads.
108   using AllocatorBase<MallocAllocator>::Deallocate;
109 
PrintStats()110   void PrintStats() const {}
111 };
112 
113 namespace detail {
114 
115 // We call out to an external function to actually print the message as the
116 // printing code uses Allocator.h in its implementation.
117 void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated,
118                                 size_t TotalMemory);
119 
120 } // end namespace detail
121 
122 /// \brief Allocate memory in an ever growing pool, as if by bump-pointer.
123 ///
124 /// This isn't strictly a bump-pointer allocator as it uses backing slabs of
125 /// memory rather than relying on a boundless contiguous heap. However, it has
126 /// bump-pointer semantics in that it is a monotonically growing pool of memory
127 /// where every allocation is found by merely allocating the next N bytes in
128 /// the slab, or the next N bytes in the next slab.
129 ///
130 /// Note that this also has a threshold for forcing allocations above a certain
131 /// size into their own slab.
132 ///
133 /// The BumpPtrAllocatorImpl template defaults to using a MallocAllocator
134 /// object, which wraps malloc, to allocate memory, but it can be changed to
135 /// use a custom allocator.
136 template <typename AllocatorT = MallocAllocator, size_t SlabSize = 4096,
137           size_t SizeThreshold = SlabSize>
138 class BumpPtrAllocatorImpl
139     : public AllocatorBase<
140           BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold>> {
141 public:
142   static_assert(SizeThreshold <= SlabSize,
143                 "The SizeThreshold must be at most the SlabSize to ensure "
144                 "that objects larger than a slab go into their own memory "
145                 "allocation.");
146 
BumpPtrAllocatorImpl()147   BumpPtrAllocatorImpl()
148       : CurPtr(nullptr), End(nullptr), BytesAllocated(0), Allocator() {}
149   template <typename T>
BumpPtrAllocatorImpl(T && Allocator)150   BumpPtrAllocatorImpl(T &&Allocator)
151       : CurPtr(nullptr), End(nullptr), BytesAllocated(0),
152         Allocator(std::forward<T &&>(Allocator)) {}
153 
154   // Manually implement a move constructor as we must clear the old allocator's
155   // slabs as a matter of correctness.
BumpPtrAllocatorImpl(BumpPtrAllocatorImpl && Old)156   BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
157       : CurPtr(Old.CurPtr), End(Old.End), Slabs(std::move(Old.Slabs)),
158         CustomSizedSlabs(std::move(Old.CustomSizedSlabs)),
159         BytesAllocated(Old.BytesAllocated),
160         Allocator(std::move(Old.Allocator)) {
161     Old.CurPtr = Old.End = nullptr;
162     Old.BytesAllocated = 0;
163     Old.Slabs.clear();
164     Old.CustomSizedSlabs.clear();
165   }
166 
~BumpPtrAllocatorImpl()167   ~BumpPtrAllocatorImpl() {
168     DeallocateSlabs(Slabs.begin(), Slabs.end());
169     DeallocateCustomSizedSlabs();
170   }
171 
172   BumpPtrAllocatorImpl &operator=(BumpPtrAllocatorImpl &&RHS) {
173     DeallocateSlabs(Slabs.begin(), Slabs.end());
174     DeallocateCustomSizedSlabs();
175 
176     CurPtr = RHS.CurPtr;
177     End = RHS.End;
178     BytesAllocated = RHS.BytesAllocated;
179     Slabs = std::move(RHS.Slabs);
180     CustomSizedSlabs = std::move(RHS.CustomSizedSlabs);
181     Allocator = std::move(RHS.Allocator);
182 
183     RHS.CurPtr = RHS.End = nullptr;
184     RHS.BytesAllocated = 0;
185     RHS.Slabs.clear();
186     RHS.CustomSizedSlabs.clear();
187     return *this;
188   }
189 
190   /// \brief Deallocate all but the current slab and reset the current pointer
191   /// to the beginning of it, freeing all memory allocated so far.
Reset()192   void Reset() {
193     // Deallocate all but the first slab, and deallocate all custom-sized slabs.
194     DeallocateCustomSizedSlabs();
195     CustomSizedSlabs.clear();
196 
197     if (Slabs.empty())
198       return;
199 
200     // Reset the state.
201     BytesAllocated = 0;
202     CurPtr = (char *)Slabs.front();
203     End = CurPtr + SlabSize;
204 
205     __asan_poison_memory_region(*Slabs.begin(), computeSlabSize(0));
206     DeallocateSlabs(std::next(Slabs.begin()), Slabs.end());
207     Slabs.erase(std::next(Slabs.begin()), Slabs.end());
208   }
209 
210   /// \brief Allocate space at the specified alignment.
211   LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void *
Allocate(size_t Size,size_t Alignment)212   Allocate(size_t Size, size_t Alignment) {
213     assert(Alignment > 0 && "0-byte alignnment is not allowed. Use 1 instead.");
214 
215     // Keep track of how many bytes we've allocated.
216     BytesAllocated += Size;
217 
218     size_t Adjustment = alignmentAdjustment(CurPtr, Alignment);
219     assert(Adjustment + Size >= Size && "Adjustment + Size must not overflow");
220 
221     // Check if we have enough space.
222     if (Adjustment + Size <= size_t(End - CurPtr)) {
223       char *AlignedPtr = CurPtr + Adjustment;
224       CurPtr = AlignedPtr + Size;
225       // Update the allocation point of this memory block in MemorySanitizer.
226       // Without this, MemorySanitizer messages for values originated from here
227       // will point to the allocation of the entire slab.
228       __msan_allocated_memory(AlignedPtr, Size);
229       // Similarly, tell ASan about this space.
230       __asan_unpoison_memory_region(AlignedPtr, Size);
231       return AlignedPtr;
232     }
233 
234     // If Size is really big, allocate a separate slab for it.
235     size_t PaddedSize = Size + Alignment - 1;
236     if (PaddedSize > SizeThreshold) {
237       void *NewSlab = Allocator.Allocate(PaddedSize, 0);
238       // We own the new slab and don't want anyone reading anyting other than
239       // pieces returned from this method.  So poison the whole slab.
240       __asan_poison_memory_region(NewSlab, PaddedSize);
241       CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize));
242 
243       uintptr_t AlignedAddr = alignAddr(NewSlab, Alignment);
244       assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize);
245       char *AlignedPtr = (char*)AlignedAddr;
246       __msan_allocated_memory(AlignedPtr, Size);
247       __asan_unpoison_memory_region(AlignedPtr, Size);
248       return AlignedPtr;
249     }
250 
251     // Otherwise, start a new slab and try again.
252     StartNewSlab();
253     uintptr_t AlignedAddr = alignAddr(CurPtr, Alignment);
254     assert(AlignedAddr + Size <= (uintptr_t)End &&
255            "Unable to allocate memory!");
256     char *AlignedPtr = (char*)AlignedAddr;
257     CurPtr = AlignedPtr + Size;
258     __msan_allocated_memory(AlignedPtr, Size);
259     __asan_unpoison_memory_region(AlignedPtr, Size);
260     return AlignedPtr;
261   }
262 
263   // Pull in base class overloads.
264   using AllocatorBase<BumpPtrAllocatorImpl>::Allocate;
265 
Deallocate(const void * Ptr,size_t Size)266   void Deallocate(const void *Ptr, size_t Size) {
267     __asan_poison_memory_region(Ptr, Size);
268   }
269 
270   // Pull in base class overloads.
271   using AllocatorBase<BumpPtrAllocatorImpl>::Deallocate;
272 
GetNumSlabs()273   size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); }
274 
getTotalMemory()275   size_t getTotalMemory() const {
276     size_t TotalMemory = 0;
277     for (auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I)
278       TotalMemory += computeSlabSize(std::distance(Slabs.begin(), I));
279     for (auto &PtrAndSize : CustomSizedSlabs)
280       TotalMemory += PtrAndSize.second;
281     return TotalMemory;
282   }
283 
getBytesAllocated()284   size_t getBytesAllocated() const { return BytesAllocated; }
285 
PrintStats()286   void PrintStats() const {
287     detail::printBumpPtrAllocatorStats(Slabs.size(), BytesAllocated,
288                                        getTotalMemory());
289   }
290 
291 private:
292   /// \brief The current pointer into the current slab.
293   ///
294   /// This points to the next free byte in the slab.
295   char *CurPtr;
296 
297   /// \brief The end of the current slab.
298   char *End;
299 
300   /// \brief The slabs allocated so far.
301   SmallVector<void *, 4> Slabs;
302 
303   /// \brief Custom-sized slabs allocated for too-large allocation requests.
304   SmallVector<std::pair<void *, size_t>, 0> CustomSizedSlabs;
305 
306   /// \brief How many bytes we've allocated.
307   ///
308   /// Used so that we can compute how much space was wasted.
309   size_t BytesAllocated;
310 
311   /// \brief The allocator instance we use to get slabs of memory.
312   AllocatorT Allocator;
313 
computeSlabSize(unsigned SlabIdx)314   static size_t computeSlabSize(unsigned SlabIdx) {
315     // Scale the actual allocated slab size based on the number of slabs
316     // allocated. Every 128 slabs allocated, we double the allocated size to
317     // reduce allocation frequency, but saturate at multiplying the slab size by
318     // 2^30.
319     return SlabSize * ((size_t)1 << std::min<size_t>(30, SlabIdx / 128));
320   }
321 
322   /// \brief Allocate a new slab and move the bump pointers over into the new
323   /// slab, modifying CurPtr and End.
StartNewSlab()324   void StartNewSlab() {
325     size_t AllocatedSlabSize = computeSlabSize(Slabs.size());
326 
327     void *NewSlab = Allocator.Allocate(AllocatedSlabSize, 0);
328     // We own the new slab and don't want anyone reading anything other than
329     // pieces returned from this method.  So poison the whole slab.
330     __asan_poison_memory_region(NewSlab, AllocatedSlabSize);
331 
332     Slabs.push_back(NewSlab);
333     CurPtr = (char *)(NewSlab);
334     End = ((char *)NewSlab) + AllocatedSlabSize;
335   }
336 
337   /// \brief Deallocate a sequence of slabs.
DeallocateSlabs(SmallVectorImpl<void * >::iterator I,SmallVectorImpl<void * >::iterator E)338   void DeallocateSlabs(SmallVectorImpl<void *>::iterator I,
339                        SmallVectorImpl<void *>::iterator E) {
340     for (; I != E; ++I) {
341       size_t AllocatedSlabSize =
342           computeSlabSize(std::distance(Slabs.begin(), I));
343       Allocator.Deallocate(*I, AllocatedSlabSize);
344     }
345   }
346 
347   /// \brief Deallocate all memory for custom sized slabs.
DeallocateCustomSizedSlabs()348   void DeallocateCustomSizedSlabs() {
349     for (auto &PtrAndSize : CustomSizedSlabs) {
350       void *Ptr = PtrAndSize.first;
351       size_t Size = PtrAndSize.second;
352       Allocator.Deallocate(Ptr, Size);
353     }
354   }
355 
356   template <typename T> friend class SpecificBumpPtrAllocator;
357 };
358 
359 /// \brief The standard BumpPtrAllocator which just uses the default template
360 /// paramaters.
361 typedef BumpPtrAllocatorImpl<> BumpPtrAllocator;
362 
363 /// \brief A BumpPtrAllocator that allows only elements of a specific type to be
364 /// allocated.
365 ///
366 /// This allows calling the destructor in DestroyAll() and when the allocator is
367 /// destroyed.
368 template <typename T> class SpecificBumpPtrAllocator {
369   BumpPtrAllocator Allocator;
370 
371 public:
372   SpecificBumpPtrAllocator() = default;
SpecificBumpPtrAllocator(SpecificBumpPtrAllocator && Old)373   SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old)
374       : Allocator(std::move(Old.Allocator)) {}
~SpecificBumpPtrAllocator()375   ~SpecificBumpPtrAllocator() { DestroyAll(); }
376 
377   SpecificBumpPtrAllocator &operator=(SpecificBumpPtrAllocator &&RHS) {
378     Allocator = std::move(RHS.Allocator);
379     return *this;
380   }
381 
382   /// Call the destructor of each allocated object and deallocate all but the
383   /// current slab and reset the current pointer to the beginning of it, freeing
384   /// all memory allocated so far.
DestroyAll()385   void DestroyAll() {
386     auto DestroyElements = [](char *Begin, char *End) {
387       assert(Begin == (char *)alignAddr(Begin, alignof(T)));
388       for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T))
389         reinterpret_cast<T *>(Ptr)->~T();
390     };
391 
392     for (auto I = Allocator.Slabs.begin(), E = Allocator.Slabs.end(); I != E;
393          ++I) {
394       size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
395           std::distance(Allocator.Slabs.begin(), I));
396       char *Begin = (char *)alignAddr(*I, alignof(T));
397       char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr
398                                                : (char *)*I + AllocatedSlabSize;
399 
400       DestroyElements(Begin, End);
401     }
402 
403     for (auto &PtrAndSize : Allocator.CustomSizedSlabs) {
404       void *Ptr = PtrAndSize.first;
405       size_t Size = PtrAndSize.second;
406       DestroyElements((char *)alignAddr(Ptr, alignof(T)), (char *)Ptr + Size);
407     }
408 
409     Allocator.Reset();
410   }
411 
412   /// \brief Allocate space for an array of objects without constructing them.
413   T *Allocate(size_t num = 1) { return Allocator.Allocate<T>(num); }
414 };
415 
416 } // end namespace llvm
417 
418 template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold>
new(size_t Size,llvm::BumpPtrAllocatorImpl<AllocatorT,SlabSize,SizeThreshold> & Allocator)419 void *operator new(size_t Size,
420                    llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize,
421                                               SizeThreshold> &Allocator) {
422   struct S {
423     char c;
424     union {
425       double D;
426       long double LD;
427       long long L;
428       void *P;
429     } x;
430   };
431   return Allocator.Allocate(
432       Size, std::min((size_t)llvm::NextPowerOf2(Size), offsetof(S, x)));
433 }
434 
435 template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold>
delete(void *,llvm::BumpPtrAllocatorImpl<AllocatorT,SlabSize,SizeThreshold> &)436 void operator delete(
437     void *, llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold> &) {
438 }
439 
440 #endif // LLVM_SUPPORT_ALLOCATOR_H
441