• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2012 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrMemoryPool_DEFINED
9 #define GrMemoryPool_DEFINED
10 
11 #include "src/core/SkBlockAllocator.h"
12 
13 #ifdef SK_DEBUG
14 #include "include/private/SkTHash.h"
15 #endif
16 
17 /**
18  * Allocates memory in blocks and parcels out space in the blocks for allocation requests. It is
19  * optimized for allocate / release speed over memory efficiency. The interface is designed to be
20  * used to implement operator new and delete overrides. All allocations are expected to be released
21  * before the pool's destructor is called. Allocations will be aligned to sizeof(std::max_align_t).
22  *
23  * All allocated objects must be released back to the memory pool before it can be destroyed.
24  */
25 class GrMemoryPool {
26 public:
27 #ifdef SK_FORCE_8_BYTE_ALIGNMENT
28     // https://github.com/emscripten-core/emscripten/issues/10072
29     // Since Skia does not use "long double" (16 bytes), we should be ok to force it back to 8 bytes
30     // until emscripten is fixed.
31     inline static constexpr size_t kAlignment = 8;
32 #else
33     // Guaranteed alignment of pointer returned by allocate().
34     inline static constexpr size_t kAlignment = alignof(std::max_align_t);
35 #endif
36 
37     // Smallest block size allocated on the heap (not the smallest reservation via allocate()).
38     inline static constexpr size_t kMinAllocationSize = 1 << 10;
39 
40     /**
41      * Prealloc size is the amount of space to allocate at pool creation
42      * time and keep around until pool destruction. The min alloc size is
43      * the smallest allowed size of additional allocations. Both sizes are
44      * adjusted to ensure that they are at least as large as kMinAllocationSize
45      * and less than SkBlockAllocator::kMaxAllocationSize.
46      *
47      * Both sizes are what the pool will end up allocating from the system, and
48      * portions of the allocated memory is used for internal bookkeeping.
49      */
50     static std::unique_ptr<GrMemoryPool> Make(size_t preallocSize, size_t minAllocSize);
51 
52     ~GrMemoryPool();
delete(void * p)53     void operator delete(void* p) { ::operator delete(p); }
54 
55     /**
56      * Allocates memory. The memory must be freed with release() before the GrMemoryPool is deleted.
57      */
58     void* allocate(size_t size);
59     /**
60      * p must have been returned by allocate().
61      */
62     void release(void* p);
63 
64     /**
65      * Returns true if there are no unreleased allocations.
66      */
isEmpty()67     bool isEmpty() const {
68         // If size is the same as preallocSize, there aren't any heap blocks, so currentBlock()
69         // is the inline head block.
70         return fAllocator.currentBlock() == fAllocator.headBlock() &&
71                fAllocator.currentBlock()->metadata() == 0;
72     }
73 
74     /**
75      * In debug mode, this reports the IDs of unfreed nodes via `SkDebugf`. This reporting is also
76      * performed automatically whenever a GrMemoryPool is destroyed.
77      * In release mode, this method is a no-op.
78      */
79     void reportLeaks() const;
80 
81     /**
82      * Returns the total allocated size of the GrMemoryPool minus any preallocated amount
83      */
size()84     size_t size() const { return fAllocator.totalSize() - fAllocator.preallocSize(); }
85 
86     /**
87      * Returns the preallocated size of the GrMemoryPool
88      */
preallocSize()89     size_t preallocSize() const {
90         // Account for the debug-only fields in this count, the offset is 0 for release builds
91         static_assert(std::is_standard_layout<GrMemoryPool>::value, "");
92         return offsetof(GrMemoryPool, fAllocator) + fAllocator.preallocSize();
93     }
94 
95     /**
96      * Frees any scratch blocks that are no longer being used.
97      */
resetScratchSpace()98     void resetScratchSpace() {
99         fAllocator.resetScratchSpace();
100     }
101 
102 #ifdef SK_DEBUG
103     void validate() const;
104 #endif
105 
106 private:
107     // Per-allocation overhead so that GrMemoryPool can always identify the block owning each and
108     // release all occupied bytes, including any resulting from alignment padding.
109     struct Header {
110         int fStart;
111         int fEnd;
112 #if defined(SK_DEBUG)
113         int fID;       // ID that can be used to track down leaks by clients.
114 #endif
115 #if defined(SK_DEBUG) || defined(SK_SANITIZE_ADDRESS)
116         int fSentinel; // set to a known value to check for memory stomping; poisoned in ASAN mode
117 #endif
118     };
119 
120     GrMemoryPool(size_t preallocSize, size_t minAllocSize);
121 
122 #ifdef SK_DEBUG
123     // Because this exists preallocSize wants to use offsetof, so keep GrMemoryPool standard layout
124     // without depending on SkTHashSet being standard layout. Note that std::unique_ptr may not be
125     // standard layout.
126     struct Debug{
127         SkTHashSet<int>  fAllocatedIDs;
128         int              fAllocationCount;
129     };
130     Debug* fDebug{nullptr};
131 #endif
132 
133     SkBlockAllocator fAllocator; // Must be the last field, in order to use extra allocated space
134 };
135 #endif
136