1 /*
2 * Copyright 2010 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef SkTBlockList_DEFINED
9 #define SkTBlockList_DEFINED
10
11 #include "src/core/SkBlockAllocator.h"
12
13 #include <type_traits>
14
15 // Forward declarations for the iterators used by SkTBlockList
16 using IndexFn = int (*)(const SkBlockAllocator::Block*);
17 using NextFn = int (*)(const SkBlockAllocator::Block*, int);
18 template<typename T, typename B> using ItemFn = T (*)(B*, int);
19 template <typename T, bool Forward, bool Const, IndexFn Start, IndexFn End, NextFn Next,
20 ItemFn<T, typename std::conditional<Const, const SkBlockAllocator::Block,
21 SkBlockAllocator::Block>::type> Resolve>
22 class BlockIndexIterator;
23
24 /**
25 * SkTBlockList manages dynamic storage for instances of T, reserving fixed blocks such that
26 * allocation is amortized across every N instances. In this way it is a hybrid of an array-based
27 * vector and a linked-list. T can be any type and non-trivial destructors are automatically
28 * invoked when the SkTBlockList is destructed. The addresses of instances are guaranteed
29 * not to move except when a list is concatenated to another.
30 *
31 * The collection supports storing a templated number of elements inline before heap-allocated
32 * blocks are made to hold additional instances. By default, the heap blocks are sized to hold the
33 * same number of items as the inline block. A common pattern is to have the inline size hold only
34 * a small number of items for the common case and then allocate larger blocks when needed.
35 *
36 * If the size of a collection is N, and its block size is B, the complexity of the common
37 * operations are:
38 * - push_back()/emplace_back(): O(1), with malloc O(B)
39 * - pop_back(): O(1), with free O(B)
40 * - front()/back(): O(1)
41 * - reset(): O(N) for non-trivial types, O(N/B) for trivial types
42 * - concat(): O(B)
43 * - random access: O(N/B)
44 * - iteration: O(1) at each step
45 *
46 * These characteristics make it well suited for allocating items in a LIFO ordering, or otherwise
47 * acting as a stack, or simply using it as a typed allocator.
48 */
49 template <typename T, int StartingItems = 1>
50 class SkTBlockList {
51 public:
52 /**
53 * Create an allocator that defaults to using StartingItems as heap increment.
54 */
SkTBlockList()55 SkTBlockList() : SkTBlockList(StartingItems) {}
56
57 /**
58 * Create an allocator
59 *
60 * @param itemsPerBlock the number of items to allocate at once
61 */
62 explicit SkTBlockList(int itemsPerBlock,
63 SkBlockAllocator::GrowthPolicy policy =
64 SkBlockAllocator::GrowthPolicy::kFixed)
65 : fAllocator(policy,
66 SkBlockAllocator::BlockOverhead<alignof(T)>() + sizeof(T)*itemsPerBlock) {}
67
~SkTBlockList()68 ~SkTBlockList() { this->reset(); }
69
70 /**
71 * Adds an item and returns it.
72 *
73 * @return the added item.
74 */
push_back()75 T& push_back() {
76 return *new (this->pushItem()) T;
77 }
push_back(const T & t)78 T& push_back(const T& t) {
79 return *new (this->pushItem()) T(t);
80 }
push_back(T && t)81 T& push_back(T&& t) {
82 return *new (this->pushItem()) T(std::move(t));
83 }
84
85 template <typename... Args>
emplace_back(Args &&...args)86 T& emplace_back(Args&&... args) {
87 return *new (this->pushItem()) T(std::forward<Args>(args)...);
88 }
89
90 /**
91 * Move all items from 'other' to the end of this collection. When this returns, 'other' will
92 * be empty. Items in 'other' may be moved as part of compacting the pre-allocated start of
93 * 'other' into this list (using T's move constructor or memcpy if T is trivially copyable), but
94 * this is O(StartingItems) and not O(N). All other items are concatenated in O(1).
95 */
96 template <int SI>
97 void concat(SkTBlockList<T, SI>&& other);
98
99 /**
100 * Allocate, if needed, space to hold N more Ts before another malloc will occur.
101 */
reserve(int n)102 void reserve(int n) {
103 int avail = fAllocator->currentBlock()->template avail<alignof(T)>() / sizeof(T);
104 if (n > avail) {
105 int reserved = n - avail;
106 // Don't consider existing bytes since we've already determined how to split the N items
107 fAllocator->template reserve<alignof(T)>(
108 reserved * sizeof(T), SkBlockAllocator::kIgnoreExistingBytes_Flag);
109 }
110 }
111
112 /**
113 * Remove the last item, only call if count() != 0
114 */
pop_back()115 void pop_back() {
116 SkASSERT(this->count() > 0);
117
118 SkBlockAllocator::Block* block = fAllocator->currentBlock();
119
120 // Run dtor for the popped item
121 int releaseIndex = Last(block);
122 GetItem(block, releaseIndex).~T();
123
124 if (releaseIndex == First(block)) {
125 fAllocator->releaseBlock(block);
126 } else {
127 // Since this always follows LIFO, the block should always be able to release the memory
128 SkAssertResult(block->release(releaseIndex, releaseIndex + sizeof(T)));
129 block->setMetadata(Decrement(block, releaseIndex));
130 }
131
132 fAllocator->setMetadata(fAllocator->metadata() - 1);
133 }
134
135 /**
136 * Removes all added items.
137 */
reset()138 void reset() {
139 // Invoke destructors in reverse order if not trivially destructible
140 if constexpr (!std::is_trivially_destructible<T>::value) {
141 for (T& t : this->ritems()) {
142 t.~T();
143 }
144 }
145
146 fAllocator->reset();
147 }
148
149 /**
150 * Returns the item count.
151 */
count()152 int count() const {
153 #ifdef SK_DEBUG
154 // Confirm total count matches sum of block counts
155 int count = 0;
156 for (const auto* b :fAllocator->blocks()) {
157 if (b->metadata() == 0) {
158 continue; // skip empty
159 }
160 count += (sizeof(T) + Last(b) - First(b)) / sizeof(T);
161 }
162 SkASSERT(count == fAllocator->metadata());
163 #endif
164 return fAllocator->metadata();
165 }
166
167 /**
168 * Is the count 0?
169 */
empty()170 bool empty() const { return this->count() == 0; }
171
172 /**
173 * Access first item, only call if count() != 0
174 */
front()175 T& front() {
176 // This assumes that the head block actually have room to store the first item.
177 static_assert(StartingItems >= 1);
178 SkASSERT(this->count() > 0 && fAllocator->headBlock()->metadata() > 0);
179 return GetItem(fAllocator->headBlock(), First(fAllocator->headBlock()));
180 }
front()181 const T& front() const {
182 SkASSERT(this->count() > 0 && fAllocator->headBlock()->metadata() > 0);
183 return GetItem(fAllocator->headBlock(), First(fAllocator->headBlock()));
184 }
185
186 /**
187 * Access last item, only call if count() != 0
188 */
back()189 T& back() {
190 SkASSERT(this->count() > 0 && fAllocator->currentBlock()->metadata() > 0);
191 return GetItem(fAllocator->currentBlock(), Last(fAllocator->currentBlock()));
192 }
back()193 const T& back() const {
194 SkASSERT(this->count() > 0 && fAllocator->currentBlock()->metadata() > 0);
195 return GetItem(fAllocator->currentBlock(), Last(fAllocator->currentBlock()));
196 }
197
198 /**
199 * Access item by index. Not an operator[] since it should not be considered constant time.
200 * Use for-range loops by calling items() or ritems() instead to access all added items in order
201 */
item(int i)202 T& item(int i) {
203 SkASSERT(i >= 0 && i < this->count());
204
205 // Iterate over blocks until we find the one that contains i.
206 for (auto* b : fAllocator->blocks()) {
207 if (b->metadata() == 0) {
208 continue; // skip empty
209 }
210
211 int start = First(b);
212 int end = Last(b) + sizeof(T); // exclusive
213 int index = start + i * sizeof(T);
214 if (index < end) {
215 return GetItem(b, index);
216 } else {
217 i -= (end - start) / sizeof(T);
218 }
219 }
220 SkUNREACHABLE;
221 }
item(int i)222 const T& item(int i) const {
223 return const_cast<SkTBlockList*>(this)->item(i);
224 }
225
226 private:
227 // Let other SkTBlockLists have access (only ever used when T and S are the same but you
228 // cannot have partial specializations declared as a friend...)
229 template<typename S, int N> friend class SkTBlockList;
230 friend class TBlockListTestAccess; // for fAllocator
231
232 inline static constexpr size_t StartingSize =
233 SkBlockAllocator::Overhead<alignof(T)>() + StartingItems * sizeof(T);
234
GetItem(SkBlockAllocator::Block * block,int index)235 static T& GetItem(SkBlockAllocator::Block* block, int index) {
236 return *static_cast<T*>(block->ptr(index));
237 }
GetItem(const SkBlockAllocator::Block * block,int index)238 static const T& GetItem(const SkBlockAllocator::Block* block, int index) {
239 return *static_cast<const T*>(block->ptr(index));
240 }
First(const SkBlockAllocator::Block * b)241 static int First(const SkBlockAllocator::Block* b) {
242 return b->firstAlignedOffset<alignof(T)>();
243 }
Last(const SkBlockAllocator::Block * b)244 static int Last(const SkBlockAllocator::Block* b) {
245 return b->metadata();
246 }
Increment(const SkBlockAllocator::Block * b,int index)247 static int Increment(const SkBlockAllocator::Block* b, int index) {
248 return index + sizeof(T);
249 }
Decrement(const SkBlockAllocator::Block * b,int index)250 static int Decrement(const SkBlockAllocator::Block* b, int index) {
251 return index - sizeof(T);
252 }
253
pushItem()254 void* pushItem() {
255 // 'template' required because fAllocator is a template, calling a template member
256 auto br = fAllocator->template allocate<alignof(T)>(sizeof(T));
257 SkASSERT(br.fStart == br.fAlignedOffset ||
258 br.fAlignedOffset == First(fAllocator->currentBlock()));
259 br.fBlock->setMetadata(br.fAlignedOffset);
260 fAllocator->setMetadata(fAllocator->metadata() + 1);
261 return br.fBlock->ptr(br.fAlignedOffset);
262 }
263
264 // N represents the number of items, whereas SkSBlockAllocator takes total bytes, so must
265 // account for the block allocator's size too.
266 //
267 // This class uses the SkBlockAllocator's metadata to track total count of items, and per-block
268 // metadata to track the index of the last allocated item within each block.
269 SkSBlockAllocator<StartingSize> fAllocator;
270
271 public:
272 using Iter = BlockIndexIterator<T&, true, false, &First, &Last, &Increment, &GetItem>;
273 using CIter = BlockIndexIterator<const T&, true, true, &First, &Last, &Increment, &GetItem>;
274 using RIter = BlockIndexIterator<T&, false, false, &Last, &First, &Decrement, &GetItem>;
275 using CRIter = BlockIndexIterator<const T&, false, true, &Last, &First, &Decrement, &GetItem>;
276
277 /**
278 * Iterate over all items in allocation order (oldest to newest) using a for-range loop:
279 *
280 * for (auto&& T : this->items()) {}
281 */
items()282 Iter items() { return Iter(fAllocator.allocator()); }
items()283 CIter items() const { return CIter(fAllocator.allocator()); }
284
285 // Iterate from newest to oldest using a for-range loop.
ritems()286 RIter ritems() { return RIter(fAllocator.allocator()); }
ritems()287 CRIter ritems() const { return CRIter(fAllocator.allocator()); }
288 };
289
290 template <typename T, int SI1>
291 template <int SI2>
concat(SkTBlockList<T,SI2> && other)292 void SkTBlockList<T, SI1>::concat(SkTBlockList<T, SI2>&& other) {
293 // Optimize the common case where the list to append only has a single item
294 if (other.empty()) {
295 return;
296 } else if (other.count() == 1) {
297 this->push_back(other.back());
298 other.pop_back();
299 return;
300 }
301
302 // Manually move all items in other's head block into this list; all heap blocks from 'other'
303 // will be appended to the block linked list (no per-item moves needed then).
304 int headItemCount = 0;
305 SkBlockAllocator::Block* headBlock = other.fAllocator->headBlock();
306 SkDEBUGCODE(int oldCount = this->count();)
307 if (headBlock->metadata() > 0) {
308 int headStart = First(headBlock);
309 int headEnd = Last(headBlock) + sizeof(T); // exclusive
310 headItemCount = (headEnd - headStart) / sizeof(T);
311 int avail = fAllocator->currentBlock()->template avail<alignof(T)>() / sizeof(T);
312 if (headItemCount > avail) {
313 // Make sure there is extra room for the items beyond what's already avail. Use the
314 // kIgnoreGrowthPolicy_Flag to make this reservation as tight as possible since
315 // 'other's heap blocks will be appended after it and any extra space is wasted.
316 fAllocator->template reserve<alignof(T)>((headItemCount - avail) * sizeof(T),
317 SkBlockAllocator::kIgnoreExistingBytes_Flag |
318 SkBlockAllocator::kIgnoreGrowthPolicy_Flag);
319 }
320
321 if constexpr (std::is_trivially_copy_constructible<T>::value) {
322 // memcpy all items at once (or twice between current and reserved space).
323 SkASSERT(std::is_trivially_destructible<T>::value);
324 auto copy = [](SkBlockAllocator::Block* src, int start, SkBlockAllocator* dst, int n) {
325 auto target = dst->template allocate<alignof(T)>(n * sizeof(T));
326 memcpy(target.fBlock->ptr(target.fAlignedOffset), src->ptr(start), n * sizeof(T));
327 target.fBlock->setMetadata(target.fAlignedOffset + (n - 1) * sizeof(T));
328 };
329
330 if (avail > 0) {
331 // Copy 0 to avail items into existing tail block
332 copy(headBlock, headStart, fAllocator.allocator(), std::min(headItemCount, avail));
333 }
334 if (headItemCount > avail) {
335 // Copy (head count - avail) into the extra reserved space
336 copy(headBlock, headStart + avail * sizeof(T),
337 fAllocator.allocator(), headItemCount - avail);
338 }
339 fAllocator->setMetadata(fAllocator->metadata() + headItemCount);
340 } else {
341 // Move every item over one at a time
342 for (int i = headStart; i < headEnd; i += sizeof(T)) {
343 T& toMove = GetItem(headBlock, i);
344 this->push_back(std::move(toMove));
345 // Anything of interest should have been moved, but run this since T isn't
346 // a trusted type.
347 toMove.~T(); // NOLINT(bugprone-use-after-move): calling dtor always allowed
348 }
349 }
350
351 other.fAllocator->releaseBlock(headBlock);
352 }
353
354 // other's head block must have been fully copied since it cannot be stolen
355 SkASSERT(other.fAllocator->headBlock()->metadata() == 0 &&
356 fAllocator->metadata() == oldCount + headItemCount);
357 fAllocator->stealHeapBlocks(other.fAllocator.allocator());
358 fAllocator->setMetadata(fAllocator->metadata() +
359 (other.fAllocator->metadata() - headItemCount));
360 other.fAllocator->setMetadata(0);
361 }
362
363 /**
364 * BlockIndexIterator provides a reusable iterator template for collections built on top of a
365 * SkBlockAllocator, where each item is of the same type, and the index to an item can be iterated
366 * over in a known manner. It supports const and non-const, and forward and reverse, assuming it's
367 * provided with proper functions for starting, ending, and advancing.
368 */
369 template <typename T, // The element type (including any modifiers)
370 bool Forward, // Are indices within a block increasing or decreasing with iteration?
371 bool Const, // Whether or not T is const
372 IndexFn Start, // Returns the index of the first valid item in a block
373 IndexFn End, // Returns the index of the last valid item (so it is inclusive)
374 NextFn Next, // Returns the next index given the current index
375 ItemFn<T, typename std::conditional<Const, const SkBlockAllocator::Block,
376 SkBlockAllocator::Block>::type> Resolve>
377 class BlockIndexIterator {
378 using BlockIter = typename SkBlockAllocator::BlockIter<Forward, Const>;
379 public:
BlockIndexIterator(BlockIter iter)380 BlockIndexIterator(BlockIter iter) : fBlockIter(iter) {}
381
382 class Item {
383 public:
384 bool operator!=(const Item& other) const {
385 return other.fBlock != fBlock || (SkToBool(*fBlock) && other.fIndex != fIndex);
386 }
387
388 T operator*() const {
389 SkASSERT(*fBlock);
390 return Resolve(*fBlock, fIndex);
391 }
392
393 Item& operator++() {
394 const auto* block = *fBlock;
395 SkASSERT(block && block->metadata() > 0);
396 SkASSERT((Forward && Next(block, fIndex) > fIndex) ||
397 (!Forward && Next(block, fIndex) < fIndex));
398 fIndex = Next(block, fIndex);
399 if ((Forward && fIndex > fEndIndex) || (!Forward && fIndex < fEndIndex)) {
400 ++fBlock;
401 this->setIndices();
402 }
403 return *this;
404 }
405
406 private:
407 friend BlockIndexIterator;
408 using BlockItem = typename BlockIter::Item;
409
Item(BlockItem block)410 Item(BlockItem block) : fBlock(block) {
411 this->setIndices();
412 }
413
setIndices()414 void setIndices() {
415 // Skip empty blocks
416 while(*fBlock && (*fBlock)->metadata() == 0) {
417 ++fBlock;
418 }
419 if (*fBlock) {
420 fIndex = Start(*fBlock);
421 fEndIndex = End(*fBlock);
422 } else {
423 fIndex = 0;
424 fEndIndex = 0;
425 }
426
427 SkASSERT((Forward && fIndex <= fEndIndex) || (!Forward && fIndex >= fEndIndex));
428 }
429
430 BlockItem fBlock;
431 int fIndex;
432 int fEndIndex;
433 };
434
begin()435 Item begin() const { return Item(fBlockIter.begin()); }
end()436 Item end() const { return Item(fBlockIter.end()); }
437
438 private:
439 BlockIter fBlockIter;
440 };
441
442 #endif
443