• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2021 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrSubRunAllocator_DEFINED
9 #define GrSubRunAllocator_DEFINED
10 
11 #include "include/core/SkMath.h"
12 #include "include/core/SkSpan.h"
13 #include "src/core/SkArenaAlloc.h"
14 
15 #include <algorithm>
16 #include <memory>
17 
18 // GrBagOfBytes parcels out bytes with a given size and alignment.
19 class GrBagOfBytes {
20 public:
21     GrBagOfBytes(char* block, size_t blockSize, size_t firstHeapAllocation);
22     explicit GrBagOfBytes(size_t firstHeapAllocation = 0);
23     ~GrBagOfBytes();
24 
25     // Given a requestedSize round up to the smallest size that accounts for all the per block
26     // overhead and alignment. It crashes if requestedSize is negative or too big.
PlatformMinimumSizeWithOverhead(int requestedSize,int assumedAlignment)27     static constexpr int PlatformMinimumSizeWithOverhead(int requestedSize, int assumedAlignment) {
28         return MinimumSizeWithOverhead(
29                 requestedSize, assumedAlignment, sizeof(Block), kMaxAlignment);
30     }
31 
MinimumSizeWithOverhead(int requestedSize,int assumedAlignment,int blockSize,int maxAlignment)32     static constexpr int MinimumSizeWithOverhead(
33             int requestedSize, int assumedAlignment, int blockSize, int maxAlignment) {
34         SkASSERT_RELEASE(0 <= requestedSize && requestedSize < kMaxByteSize);
35         SkASSERT_RELEASE(SkIsPow2(assumedAlignment) && SkIsPow2(maxAlignment));
36 
37         const int minAlignment = std::min(maxAlignment, assumedAlignment);
38         // There are two cases, one easy and one subtle. The easy case is when minAlignment ==
39         // maxAlignment. When that happens, the term maxAlignment - minAlignment is zero, and the
40         // block will be placed at the proper alignment because alignUp is properly
41         // aligned.
42         // The subtle case is where minAlignment < maxAlignment. Because
43         // minAlignment < maxAlignment, alignUp(requestedSize, minAlignment) + blockSize does not
44         // guarantee that block can be placed at a maxAlignment address. Block can be placed at
45         // maxAlignment/minAlignment different address to achieve alignment, so we need
46         // to add memory to allow the block to be placed on a maxAlignment address.
47         // For example, if assumedAlignment = 4 and maxAlignment = 16 then block can be placed at
48         // the following address offsets at the end of minimumSize bytes.
49         //   0 * minAlignment =  0
50         //   1 * minAlignment =  4
51         //   2 * minAlignment =  8
52         //   3 * minAlignment = 12
53         // Following this logic, the equation for the additional bytes is
54         //   (maxAlignment/minAlignment - 1) * minAlignment
55         //     = maxAlignment - minAlignment.
56         int minimumSize = AlignUp(requestedSize, minAlignment)
57                           + blockSize
58                           + maxAlignment - minAlignment;
59 
60         // If minimumSize is > 32k then round to a 4K boundary unless it is too close to the
61         // maximum int. The > 32K heuristic is from the JEMalloc behavior.
62         constexpr int k32K = (1 << 15);
63         if (minimumSize >= k32K && minimumSize < std::numeric_limits<int>::max() - k4K) {
64             minimumSize = AlignUp(minimumSize, k4K);
65         }
66 
67         return minimumSize;
68     }
69 
70     template <int size>
71     using Storage = std::array<char, PlatformMinimumSizeWithOverhead(size, 1)>;
72 
73     // Returns a pointer to memory suitable for holding n Ts.
74     template <typename T> char* allocateBytesFor(int n = 1) {
75         static_assert(alignof(T) <= kMaxAlignment, "Alignment is too big for arena");
76         static_assert(sizeof(T) < kMaxByteSize, "Size is too big for arena");
77         constexpr int kMaxN = kMaxByteSize / sizeof(T);
78         SkASSERT_RELEASE(0 <= n && n < kMaxN);
79 
80         int size = n ? n * sizeof(T) : 1;
81         return this->allocateBytes(size, alignof(T));
82     }
83 
84     void* alignedBytes(int unsafeSize, int unsafeAlignment);
85 
86 private:
87     // The maximum alignment supported by GrBagOfBytes. 16 seems to be a good number for alignment.
88     // If a use case for larger alignments is found, we can turn this into a template parameter.
89     inline static constexpr int kMaxAlignment = std::max(16, (int)alignof(std::max_align_t));
90     // The largest size that can be allocated. In larger sizes, the block is rounded up to 4K
91     // chunks. Leave a 4K of slop.
92     inline static constexpr int k4K = (1 << 12);
93     // This should never overflow with the calculations done on the code.
94     inline static constexpr int kMaxByteSize = std::numeric_limits<int>::max() - k4K;
95     // The assumed alignment of new char[] given the platform.
96     // There is a bug in Emscripten's allocator that make alignment different than max_align_t.
97     // kAllocationAlignment accounts for this difference. For more information see:
98     // https://github.com/emscripten-core/emscripten/issues/10072
99     #if !defined(SK_FORCE_8_BYTE_ALIGNMENT)
100         inline static constexpr int kAllocationAlignment = alignof(std::max_align_t);
101     #else
102         inline static constexpr int kAllocationAlignment = 8;
103     #endif
104 
AlignUp(int size,int alignment)105     static constexpr size_t AlignUp(int size, int alignment) {
106         return (size + (alignment - 1)) & -alignment;
107     }
108 
109     // The Block starts at the location pointed to by fEndByte.
110     // Beware. Order is important here. The destructor for fPrevious must be called first because
111     // the Block is embedded in fBlockStart. Destructors are run in reverse order.
112     struct Block {
113         Block(char* previous, char* startOfBlock);
114         // The start of the originally allocated bytes. This is the thing that must be deleted.
115         char* const fBlockStart;
116         Block* const fPrevious;
117     };
118 
119     // Note: fCapacity is the number of bytes remaining, and is subtracted from fEndByte to
120     // generate the location of the object.
allocateBytes(int size,int alignment)121     char* allocateBytes(int size, int alignment) {
122         fCapacity = fCapacity & -alignment;
123         if (fCapacity < size) {
124             this->needMoreBytes(size, alignment);
125         }
126         char* const ptr = fEndByte - fCapacity;
127         SkASSERT(((intptr_t)ptr & (alignment - 1)) == 0);
128         SkASSERT(fCapacity >= size);
129         fCapacity -= size;
130         return ptr;
131     }
132 
133     // Adjust fEndByte and fCapacity give a new block starting at bytes with size.
134     void setupBytesAndCapacity(char* bytes, int size);
135 
136     // Adjust fEndByte and fCapacity to satisfy the size and alignment request.
137     void needMoreBytes(int size, int alignment);
138 
139     // This points to the highest kMaxAlignment address in the allocated block. The address of
140     // the current end of allocated data is given by fEndByte - fCapacity. While the negative side
141     // of this pointer are the bytes to be allocated. The positive side points to the Block for
142     // this memory. In other words, the pointer to the Block structure for these bytes is
143     // reinterpret_cast<Block*>(fEndByte).
144     char* fEndByte{nullptr};
145 
146     // The number of bytes remaining in this block.
147     int fCapacity{0};
148 
149     SkFibBlockSizes<kMaxByteSize> fFibProgression;
150 };
151 
152 // GrSubRunAllocator provides fast allocation where the user takes care of calling the destructors
153 // of the returned pointers, and GrSubRunAllocator takes care of deleting the storage. The
154 // unique_ptrs returned, are to assist in assuring the object's destructor is called.
155 // A note on zero length arrays: according to the standard a pointer must be returned, and it
156 // can't be a nullptr. In such a case, SkArena allocates one byte, but does not initialize it.
157 class GrSubRunAllocator {
158 public:
159     struct Destroyer {
160         template <typename T>
operatorDestroyer161         void operator()(T* ptr) { ptr->~T(); }
162     };
163 
164     struct ArrayDestroyer {
165         int n;
166         template <typename T>
operatorArrayDestroyer167         void operator()(T* ptr) {
168             for (int i = 0; i < n; i++) { ptr[i].~T(); }
169         }
170     };
171 
172     template<class T>
173     inline static constexpr bool HasNoDestructor = std::is_trivially_destructible<T>::value;
174 
175     GrSubRunAllocator(char* block, int blockSize, int firstHeapAllocation);
176     explicit GrSubRunAllocator(int firstHeapAllocation = 0);
177 
makePOD(Args &&...args)178     template <typename T, typename... Args> T* makePOD(Args&&... args) {
179         static_assert(HasNoDestructor<T>, "This is not POD. Use makeUnique.");
180         char* bytes = fAlloc.template allocateBytesFor<T>();
181         return new (bytes) T(std::forward<Args>(args)...);
182     }
183 
184     template <typename T, typename... Args>
makeUnique(Args &&...args)185     std::unique_ptr<T, Destroyer> makeUnique(Args&&... args) {
186         static_assert(!HasNoDestructor<T>, "This is POD. Use makePOD.");
187         char* bytes = fAlloc.template allocateBytesFor<T>();
188         return std::unique_ptr<T, Destroyer>{new (bytes) T(std::forward<Args>(args)...)};
189     }
190 
makePODArray(int n)191     template<typename T> T* makePODArray(int n) {
192         static_assert(HasNoDestructor<T>, "This is not POD. Use makeUniqueArray.");
193         return reinterpret_cast<T*>(fAlloc.template allocateBytesFor<T>(n));
194     }
195 
196     template<typename T, typename Src, typename Map>
makePODArray(const Src & src,Map map)197     SkSpan<T> makePODArray(const Src& src, Map map) {
198         static_assert(HasNoDestructor<T>, "This is not POD. Use makeUniqueArray.");
199         int size = SkTo<int>(src.size());
200         T* result = this->template makePODArray<T>(size);
201         for (int i = 0; i < size; i++) {
202             new (&result[i]) T(map(src[i]));
203         }
204         return {result, src.size()};
205     }
206 
207     template<typename T>
makeUniqueArray(int n)208     std::unique_ptr<T[], ArrayDestroyer> makeUniqueArray(int n) {
209         static_assert(!HasNoDestructor<T>, "This is POD. Use makePODArray.");
210         T* array = reinterpret_cast<T*>(fAlloc.template allocateBytesFor<T>(n));
211         for (int i = 0; i < n; i++) {
212             new (&array[i]) T{};
213         }
214         return std::unique_ptr<T[], ArrayDestroyer>{array, ArrayDestroyer{n}};
215     }
216 
217     template<typename T, typename I>
makeUniqueArray(int n,I initializer)218     std::unique_ptr<T[], ArrayDestroyer> makeUniqueArray(int n, I initializer) {
219         static_assert(!HasNoDestructor<T>, "This is POD. Use makePODArray.");
220         T* array = reinterpret_cast<T*>(fAlloc.template allocateBytesFor<T>(n));
221         for (int i = 0; i < n; i++) {
222             new (&array[i]) T(initializer(i));
223         }
224         return std::unique_ptr<T[], ArrayDestroyer>{array, ArrayDestroyer{n}};
225     }
226 
227     void* alignedBytes(int size, int alignment);
228 
229 private:
230     GrBagOfBytes fAlloc;
231 };
232 #endif // GrSubRunAllocator_DEFINED
233