1 /*
2 * Copyright 2010 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef GrTBlockList_DEFINED
9 #define GrTBlockList_DEFINED
10
11 #include "src/gpu/GrBlockAllocator.h"
12
13 #include <type_traits>
14
15 // Forward declarations for the iterators used by GrTBlockList
16 using IndexFn = int (*)(const GrBlockAllocator::Block*);
17 using NextFn = int (*)(const GrBlockAllocator::Block*, int);
18 template<typename T, typename B> using ItemFn = T (*)(B*, int);
19 template <typename T, bool Forward, bool Const, IndexFn Start, IndexFn End, NextFn Next,
20 ItemFn<T, typename std::conditional<Const, const GrBlockAllocator::Block,
21 GrBlockAllocator::Block>::type> Resolve>
22 class BlockIndexIterator;
23
24 /**
25 * GrTBlockList manages dynamic storage for instances of T, reserving fixed blocks such that
26 * allocation is amortized across every N instances. In this way it is a hybrid of an array-based
27 * vector and a linked-list. T can be any type and non-trivial destructors are automatically
28 * invoked when the GrTBlockList is destructed. The addresses of instances are guaranteed
29 * not to move except when a list is concatenated to another.
30 *
31 * The collection supports storing a templated number of elements inline before heap-allocated
32 * blocks are made to hold additional instances. By default, the heap blocks are sized to hold the
33 * same number of items as the inline block. A common pattern is to have the inline size hold only
34 * a small number of items for the common case and then allocate larger blocks when needed.
35 *
36 * If the size of a collection is N, and its block size is B, the complexity of the common
37 * operations are:
38 * - push_back()/emplace_back(): O(1), with malloc O(B)
39 * - pop_back(): O(1), with free O(B)
40 * - front()/back(): O(1)
41 * - reset(): O(N) for non-trivial types, O(N/B) for trivial types
42 * - concat(): O(B)
43 * - random access: O(N/B)
44 * - iteration: O(1) at each step
45 *
46 * These characteristics make it well suited for allocating items in a LIFO ordering, or otherwise
47 * acting as a stack, or simply using it as a typed allocator.
48 */
49 template <typename T, int StartingItems = 1>
50 class GrTBlockList {
51 public:
52 /**
53 * Create an allocator that defaults to using StartingItems as heap increment.
54 */
GrTBlockList()55 GrTBlockList() : GrTBlockList(StartingItems) {}
56
57 /**
58 * Create an allocator
59 *
60 * @param itemsPerBlock the number of items to allocate at once
61 */
62 explicit GrTBlockList(int itemsPerBlock,
63 GrBlockAllocator::GrowthPolicy policy =
64 GrBlockAllocator::GrowthPolicy::kFixed)
65 : fAllocator(policy,
66 GrBlockAllocator::BlockOverhead<alignof(T)>() + sizeof(T)*itemsPerBlock) {}
67
~GrTBlockList()68 ~GrTBlockList() { this->reset(); }
69
70 /**
71 * Adds an item and returns it.
72 *
73 * @return the added item.
74 */
push_back()75 T& push_back() {
76 return *new (this->pushItem()) T;
77 }
push_back(const T & t)78 T& push_back(const T& t) {
79 return *new (this->pushItem()) T(t);
80 }
push_back(T && t)81 T& push_back(T&& t) {
82 return *new (this->pushItem()) T(std::move(t));
83 }
84
85 template <typename... Args>
emplace_back(Args &&...args)86 T& emplace_back(Args&&... args) {
87 return *new (this->pushItem()) T(std::forward<Args>(args)...);
88 }
89
90 /**
91 * Move all items from 'other' to the end of this collection. When this returns, 'other' will
92 * be empty. Items in 'other' may be moved as part of compacting the pre-allocated start of
93 * 'other' into this list (using T's move constructor or memcpy if T is trivially copyable), but
94 * this is O(StartingItems) and not O(N). All other items are concatenated in O(1).
95 */
96 template <int SI>
97 void concat(GrTBlockList<T, SI>&& other);
98
99 /**
100 * Allocate, if needed, space to hold N more Ts before another malloc will occur.
101 */
reserve(int n)102 void reserve(int n) {
103 int avail = fAllocator->currentBlock()->template avail<alignof(T)>() / sizeof(T);
104 if (n > avail) {
105 int reserved = n - avail;
106 // Don't consider existing bytes since we've already determined how to split the N items
107 fAllocator->template reserve<alignof(T)>(
108 reserved * sizeof(T), GrBlockAllocator::kIgnoreExistingBytes_Flag);
109 }
110 }
111
112 /**
113 * Remove the last item, only call if count() != 0
114 */
pop_back()115 void pop_back() {
116 SkASSERT(this->count() > 0);
117
118 GrBlockAllocator::Block* block = fAllocator->currentBlock();
119
120 // Run dtor for the popped item
121 int releaseIndex = Last(block);
122 GetItem(block, releaseIndex).~T();
123
124 if (releaseIndex == First(block)) {
125 fAllocator->releaseBlock(block);
126 } else {
127 // Since this always follows LIFO, the block should always be able to release the memory
128 SkAssertResult(block->release(releaseIndex, releaseIndex + sizeof(T)));
129 block->setMetadata(Decrement(block, releaseIndex));
130 }
131
132 fAllocator->setMetadata(fAllocator->metadata() - 1);
133 }
134
135 /**
136 * Removes all added items.
137 */
reset()138 void reset() {
139 // Invoke destructors in reverse order if not trivially destructible
140 if constexpr (!std::is_trivially_destructible<T>::value) {
141 for (T& t : this->ritems()) {
142 t.~T();
143 }
144 }
145
146 fAllocator->reset();
147 }
148
149 /**
150 * Returns the item count.
151 */
count()152 int count() const {
153 #ifdef SK_DEBUG
154 // Confirm total count matches sum of block counts
155 int count = 0;
156 for (const auto* b :fAllocator->blocks()) {
157 if (b->metadata() == 0) {
158 continue; // skip empty
159 }
160 count += (sizeof(T) + Last(b) - First(b)) / sizeof(T);
161 }
162 SkASSERT(count == fAllocator->metadata());
163 #endif
164 return fAllocator->metadata();
165 }
166
167 /**
168 * Is the count 0?
169 */
empty()170 bool empty() const { return this->count() == 0; }
171
172 /**
173 * Access first item, only call if count() != 0
174 */
front()175 T& front() {
176 // This assumes that the head block actually have room to store the first item.
177 static_assert(StartingItems >= 1);
178 SkASSERT(this->count() > 0 && fAllocator->headBlock()->metadata() > 0);
179 return GetItem(fAllocator->headBlock(), First(fAllocator->headBlock()));
180 }
front()181 const T& front() const {
182 SkASSERT(this->count() > 0 && fAllocator->headBlock()->metadata() > 0);
183 return GetItem(fAllocator->headBlock(), First(fAllocator->headBlock()));
184 }
185
186 /**
187 * Access last item, only call if count() != 0
188 */
back()189 T& back() {
190 SkASSERT(this->count() > 0 && fAllocator->currentBlock()->metadata() > 0);
191 return GetItem(fAllocator->currentBlock(), Last(fAllocator->currentBlock()));
192 }
back()193 const T& back() const {
194 SkASSERT(this->count() > 0 && fAllocator->currentBlock()->metadata() > 0);
195 return GetItem(fAllocator->currentBlock(), Last(fAllocator->currentBlock()));
196 }
197
198 /**
199 * Access item by index. Not an operator[] since it should not be considered constant time.
200 * Use for-range loops by calling items() or ritems() instead to access all added items in order
201 */
item(int i)202 T& item(int i) {
203 SkASSERT(i >= 0 && i < this->count());
204
205 // Iterate over blocks until we find the one that contains i.
206 for (auto* b : fAllocator->blocks()) {
207 if (b->metadata() == 0) {
208 continue; // skip empty
209 }
210
211 int start = First(b);
212 int end = Last(b) + sizeof(T); // exclusive
213 int index = start + i * sizeof(T);
214 if (index < end) {
215 return GetItem(b, index);
216 } else {
217 i -= (end - start) / sizeof(T);
218 }
219 }
220 SkUNREACHABLE;
221 }
item(int i)222 const T& item(int i) const {
223 return const_cast<GrTBlockList*>(this)->item(i);
224 }
225
226 private:
227 // Let other GrTBlockLists have access (only ever used when T and S are the same but you
228 // cannot have partial specializations declared as a friend...)
229 template<typename S, int N> friend class GrTBlockList;
230
231 static constexpr size_t StartingSize =
232 GrBlockAllocator::Overhead<alignof(T)>() + StartingItems * sizeof(T);
233
GetItem(GrBlockAllocator::Block * block,int index)234 static T& GetItem(GrBlockAllocator::Block* block, int index) {
235 return *static_cast<T*>(block->ptr(index));
236 }
GetItem(const GrBlockAllocator::Block * block,int index)237 static const T& GetItem(const GrBlockAllocator::Block* block, int index) {
238 return *static_cast<const T*>(block->ptr(index));
239 }
First(const GrBlockAllocator::Block * b)240 static int First(const GrBlockAllocator::Block* b) {
241 return b->firstAlignedOffset<alignof(T)>();
242 }
Last(const GrBlockAllocator::Block * b)243 static int Last(const GrBlockAllocator::Block* b) {
244 return b->metadata();
245 }
Increment(const GrBlockAllocator::Block * b,int index)246 static int Increment(const GrBlockAllocator::Block* b, int index) {
247 return index + sizeof(T);
248 }
Decrement(const GrBlockAllocator::Block * b,int index)249 static int Decrement(const GrBlockAllocator::Block* b, int index) {
250 return index - sizeof(T);
251 }
252
pushItem()253 void* pushItem() {
254 // 'template' required because fAllocator is a template, calling a template member
255 auto br = fAllocator->template allocate<alignof(T)>(sizeof(T));
256 SkASSERT(br.fStart == br.fAlignedOffset ||
257 br.fAlignedOffset == First(fAllocator->currentBlock()));
258 br.fBlock->setMetadata(br.fAlignedOffset);
259 fAllocator->setMetadata(fAllocator->metadata() + 1);
260 return br.fBlock->ptr(br.fAlignedOffset);
261 }
262
263 // N represents the number of items, whereas GrSBlockAllocator takes total bytes, so must
264 // account for the block allocator's size too.
265 //
266 // This class uses the GrBlockAllocator's metadata to track total count of items, and per-block
267 // metadata to track the index of the last allocated item within each block.
268 GrSBlockAllocator<StartingSize> fAllocator;
269
270 public:
271 using Iter = BlockIndexIterator<T&, true, false, &First, &Last, &Increment, &GetItem>;
272 using CIter = BlockIndexIterator<const T&, true, true, &First, &Last, &Increment, &GetItem>;
273 using RIter = BlockIndexIterator<T&, false, false, &Last, &First, &Decrement, &GetItem>;
274 using CRIter = BlockIndexIterator<const T&, false, true, &Last, &First, &Decrement, &GetItem>;
275
276 /**
277 * Iterate over all items in allocation order (oldest to newest) using a for-range loop:
278 *
279 * for (auto&& T : this->items()) {}
280 */
items()281 Iter items() { return Iter(fAllocator.allocator()); }
items()282 CIter items() const { return CIter(fAllocator.allocator()); }
283
284 // Iterate from newest to oldest using a for-range loop.
ritems()285 RIter ritems() { return RIter(fAllocator.allocator()); }
ritems()286 CRIter ritems() const { return CRIter(fAllocator.allocator()); }
287
288 #if GR_TEST_UTILS
289 // For introspection
allocator()290 const GrBlockAllocator* allocator() const { return fAllocator.allocator(); }
291 #endif
292 };
293
294 template <typename T, int SI1>
295 template <int SI2>
concat(GrTBlockList<T,SI2> && other)296 void GrTBlockList<T, SI1>::concat(GrTBlockList<T, SI2>&& other) {
297 // Optimize the common case where the list to append only has a single item
298 if (other.empty()) {
299 return;
300 } else if (other.count() == 1) {
301 this->push_back(other.back());
302 other.pop_back();
303 return;
304 }
305
306 // Manually move all items in other's head block into this list; all heap blocks from 'other'
307 // will be appended to the block linked list (no per-item moves needed then).
308 int headItemCount = 0;
309 GrBlockAllocator::Block* headBlock = other.fAllocator->headBlock();
310 SkDEBUGCODE(int oldCount = this->count();)
311 if (headBlock->metadata() > 0) {
312 int headStart = First(headBlock);
313 int headEnd = Last(headBlock) + sizeof(T); // exclusive
314 headItemCount = (headEnd - headStart) / sizeof(T);
315 int avail = fAllocator->currentBlock()->template avail<alignof(T)>() / sizeof(T);
316 if (headItemCount > avail) {
317 // Make sure there is extra room for the items beyond what's already avail. Use the
318 // kIgnoreGrowthPolicy_Flag to make this reservation as tight as possible since
319 // 'other's heap blocks will be appended after it and any extra space is wasted.
320 fAllocator->template reserve<alignof(T)>((headItemCount - avail) * sizeof(T),
321 GrBlockAllocator::kIgnoreExistingBytes_Flag |
322 GrBlockAllocator::kIgnoreGrowthPolicy_Flag);
323 }
324
325 if constexpr (std::is_trivially_copy_constructible<T>::value) {
326 // memcpy all items at once (or twice between current and reserved space).
327 SkASSERT(std::is_trivially_destructible<T>::value);
328 auto copy = [](GrBlockAllocator::Block* src, int start, GrBlockAllocator* dst, int n) {
329 auto target = dst->template allocate<alignof(T)>(n * sizeof(T));
330 memcpy(target.fBlock->ptr(target.fAlignedOffset), src->ptr(start), n * sizeof(T));
331 target.fBlock->setMetadata(target.fAlignedOffset + (n - 1) * sizeof(T));
332 };
333
334 if (avail > 0) {
335 // Copy 0 to avail items into existing tail block
336 copy(headBlock, headStart, fAllocator.allocator(), std::min(headItemCount, avail));
337 }
338 if (headItemCount > avail) {
339 // Copy (head count - avail) into the extra reserved space
340 copy(headBlock, headStart + avail * sizeof(T),
341 fAllocator.allocator(), headItemCount - avail);
342 }
343 fAllocator->setMetadata(fAllocator->metadata() + headItemCount);
344 } else {
345 // Move every item over one at a time
346 for (int i = headStart; i < headEnd; i += sizeof(T)) {
347 T& toMove = GetItem(headBlock, i);
348 this->push_back(std::move(toMove));
349 // Anything of interest should have been moved, but run this since T isn't
350 // a trusted type.
351 toMove.~T(); // NOLINT(bugprone-use-after-move): calling dtor always allowed
352 }
353 }
354
355 other.fAllocator->releaseBlock(headBlock);
356 }
357
358 // other's head block must have been fully copied since it cannot be stolen
359 SkASSERT(other.fAllocator->headBlock()->metadata() == 0 &&
360 fAllocator->metadata() == oldCount + headItemCount);
361 fAllocator->stealHeapBlocks(other.fAllocator.allocator());
362 fAllocator->setMetadata(fAllocator->metadata() +
363 (other.fAllocator->metadata() - headItemCount));
364 other.fAllocator->setMetadata(0);
365 }
366
367 /**
368 * BlockIndexIterator provides a reusable iterator template for collections built on top of a
369 * GrBlockAllocator, where each item is of the same type, and the index to an item can be iterated
370 * over in a known manner. It supports const and non-const, and forward and reverse, assuming it's
371 * provided with proper functions for starting, ending, and advancing.
372 */
373 template <typename T, // The element type (including any modifiers)
374 bool Forward, // Are indices within a block increasing or decreasing with iteration?
375 bool Const, // Whether or not T is const
376 IndexFn Start, // Returns the index of the first valid item in a block
377 IndexFn End, // Returns the index of the last valid item (so it is inclusive)
378 NextFn Next, // Returns the next index given the current index
379 ItemFn<T, typename std::conditional<Const, const GrBlockAllocator::Block,
380 GrBlockAllocator::Block>::type> Resolve>
381 class BlockIndexIterator {
382 using BlockIter = typename GrBlockAllocator::BlockIter<Forward, Const>;
383 public:
BlockIndexIterator(BlockIter iter)384 BlockIndexIterator(BlockIter iter) : fBlockIter(iter) {}
385
386 class Item {
387 public:
388 bool operator!=(const Item& other) const {
389 return other.fBlock != fBlock || (SkToBool(*fBlock) && other.fIndex != fIndex);
390 }
391
392 T operator*() const {
393 SkASSERT(*fBlock);
394 return Resolve(*fBlock, fIndex);
395 }
396
397 Item& operator++() {
398 const auto* block = *fBlock;
399 SkASSERT(block && block->metadata() > 0);
400 SkASSERT((Forward && Next(block, fIndex) > fIndex) ||
401 (!Forward && Next(block, fIndex) < fIndex));
402 fIndex = Next(block, fIndex);
403 if ((Forward && fIndex > fEndIndex) || (!Forward && fIndex < fEndIndex)) {
404 ++fBlock;
405 this->setIndices();
406 }
407 return *this;
408 }
409
410 private:
411 friend BlockIndexIterator;
412 using BlockItem = typename BlockIter::Item;
413
Item(BlockItem block)414 Item(BlockItem block) : fBlock(block) {
415 this->setIndices();
416 }
417
setIndices()418 void setIndices() {
419 // Skip empty blocks
420 while(*fBlock && (*fBlock)->metadata() == 0) {
421 ++fBlock;
422 }
423 if (*fBlock) {
424 fIndex = Start(*fBlock);
425 fEndIndex = End(*fBlock);
426 } else {
427 fIndex = 0;
428 fEndIndex = 0;
429 }
430
431 SkASSERT((Forward && fIndex <= fEndIndex) || (!Forward && fIndex >= fEndIndex));
432 }
433
434 BlockItem fBlock;
435 int fIndex;
436 int fEndIndex;
437 };
438
begin()439 Item begin() const { return Item(fBlockIter.begin()); }
end()440 Item end() const { return Item(fBlockIter.end()); }
441
442 private:
443 BlockIter fBlockIter;
444 };
445
446 #endif
447