• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2012 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "GrMemoryPool.h"
9 #include "SkMalloc.h"
10 #include "ops/GrOp.h"
11 #ifdef SK_DEBUG
12     #include <atomic>
13 #endif
14 
15 #ifdef SK_DEBUG
16     #define VALIDATE this->validate()
17 #else
18     #define VALIDATE
19 #endif
20 
release(std::unique_ptr<GrOp> op)21 void GrOpMemoryPool::release(std::unique_ptr<GrOp> op) {
22     GrOp* tmp = op.release();
23     SkASSERT(tmp);
24     tmp->~GrOp();
25     fMemoryPool.release(tmp);
26 }
27 
28 constexpr size_t GrMemoryPool::kSmallestMinAllocSize;
29 
GrMemoryPool(size_t preallocSize,size_t minAllocSize)30 GrMemoryPool::GrMemoryPool(size_t preallocSize, size_t minAllocSize) {
31     SkDEBUGCODE(fAllocationCnt = 0);
32     SkDEBUGCODE(fAllocBlockCnt = 0);
33 
34     minAllocSize = SkTMax<size_t>(GrSizeAlignUp(minAllocSize, kAlignment), kSmallestMinAllocSize);
35     preallocSize = SkTMax<size_t>(GrSizeAlignUp(preallocSize, kAlignment), minAllocSize);
36 
37     fMinAllocSize = minAllocSize;
38     fSize = 0;
39 
40     fHead = CreateBlock(preallocSize);
41     fTail = fHead;
42     fHead->fNext = nullptr;
43     fHead->fPrev = nullptr;
44     VALIDATE;
45 };
46 
~GrMemoryPool()47 GrMemoryPool::~GrMemoryPool() {
48     VALIDATE;
49 #ifdef SK_DEBUG
50     int i = 0;
51     int n = fAllocatedIDs.count();
52     fAllocatedIDs.foreach([&i, n] (int32_t id) {
53         if (++i == 1) {
54             SkDebugf("Leaked IDs (in no particular order): %d", id);
55         } else if (i < 11) {
56             SkDebugf(", %d%s", id, (n == i ? "\n" : ""));
57         } else if (i == 11) {
58             SkDebugf(", ...\n");
59         }
60     });
61 #endif
62     SkASSERT(0 == fAllocationCnt);
63     SkASSERT(fHead == fTail);
64     SkASSERT(0 == fHead->fLiveCount);
65     DeleteBlock(fHead);
66 };
67 
allocate(size_t size)68 void* GrMemoryPool::allocate(size_t size) {
69     VALIDATE;
70     size += kPerAllocPad;
71     size = GrSizeAlignUp(size, kAlignment);
72     if (fTail->fFreeSize < size) {
73         size_t blockSize = size + kHeaderSize;
74         blockSize = SkTMax<size_t>(blockSize, fMinAllocSize);
75         BlockHeader* block = CreateBlock(blockSize);
76 
77         block->fPrev = fTail;
78         block->fNext = nullptr;
79         SkASSERT(nullptr == fTail->fNext);
80         fTail->fNext = block;
81         fTail = block;
82         fSize += block->fSize;
83         SkDEBUGCODE(++fAllocBlockCnt);
84     }
85     SkASSERT(kAssignedMarker == fTail->fBlockSentinal);
86     SkASSERT(fTail->fFreeSize >= size);
87     intptr_t ptr = fTail->fCurrPtr;
88     // We stash a pointer to the block header, just before the allocated space,
89     // so that we can decrement the live count on delete in constant time.
90     AllocHeader* allocData = reinterpret_cast<AllocHeader*>(ptr);
91     SkDEBUGCODE(allocData->fSentinal = kAssignedMarker);
92     SkDEBUGCODE(allocData->fID = []{
93         static std::atomic<int32_t> nextID{1};
94         return nextID++;
95     }());
96     // You can set a breakpoint here when a leaked ID is allocated to see the stack frame.
97     SkDEBUGCODE(fAllocatedIDs.add(allocData->fID));
98     allocData->fHeader = fTail;
99     ptr += kPerAllocPad;
100     fTail->fPrevPtr = fTail->fCurrPtr;
101     fTail->fCurrPtr += size;
102     fTail->fFreeSize -= size;
103     fTail->fLiveCount += 1;
104     SkDEBUGCODE(++fAllocationCnt);
105     VALIDATE;
106     return reinterpret_cast<void*>(ptr);
107 }
108 
release(void * p)109 void GrMemoryPool::release(void* p) {
110     VALIDATE;
111     intptr_t ptr = reinterpret_cast<intptr_t>(p) - kPerAllocPad;
112     AllocHeader* allocData = reinterpret_cast<AllocHeader*>(ptr);
113     SkASSERT(kAssignedMarker == allocData->fSentinal);
114     SkDEBUGCODE(allocData->fSentinal = kFreedMarker);
115     SkDEBUGCODE(fAllocatedIDs.remove(allocData->fID));
116     BlockHeader* block = allocData->fHeader;
117     SkASSERT(kAssignedMarker == block->fBlockSentinal);
118     if (1 == block->fLiveCount) {
119         // the head block is special, it is reset rather than deleted
120         if (fHead == block) {
121             fHead->fCurrPtr = reinterpret_cast<intptr_t>(fHead) + kHeaderSize;
122             fHead->fLiveCount = 0;
123             fHead->fFreeSize = fHead->fSize - kHeaderSize;
124         } else {
125             BlockHeader* prev = block->fPrev;
126             BlockHeader* next = block->fNext;
127             SkASSERT(prev);
128             prev->fNext = next;
129             if (next) {
130                 next->fPrev = prev;
131             } else {
132                 SkASSERT(fTail == block);
133                 fTail = prev;
134             }
135             fSize -= block->fSize;
136             DeleteBlock(block);
137             SkDEBUGCODE(fAllocBlockCnt--);
138         }
139     } else {
140         --block->fLiveCount;
141         // Trivial reclaim: if we're releasing the most recent allocation, reuse it
142         if (block->fPrevPtr == ptr) {
143             block->fFreeSize += (block->fCurrPtr - block->fPrevPtr);
144             block->fCurrPtr = block->fPrevPtr;
145         }
146     }
147     SkDEBUGCODE(--fAllocationCnt);
148     VALIDATE;
149 }
150 
CreateBlock(size_t blockSize)151 GrMemoryPool::BlockHeader* GrMemoryPool::CreateBlock(size_t blockSize) {
152     blockSize = SkTMax<size_t>(blockSize, kHeaderSize);
153     BlockHeader* block =
154         reinterpret_cast<BlockHeader*>(sk_malloc_throw(blockSize));
155     // we assume malloc gives us aligned memory
156     SkASSERT(!(reinterpret_cast<intptr_t>(block) % kAlignment));
157     SkDEBUGCODE(block->fBlockSentinal = kAssignedMarker);
158     block->fLiveCount = 0;
159     block->fFreeSize = blockSize - kHeaderSize;
160     block->fCurrPtr = reinterpret_cast<intptr_t>(block) + kHeaderSize;
161     block->fPrevPtr = 0; // gcc warns on assigning nullptr to an intptr_t.
162     block->fSize = blockSize;
163     return block;
164 }
165 
DeleteBlock(BlockHeader * block)166 void GrMemoryPool::DeleteBlock(BlockHeader* block) {
167     SkASSERT(kAssignedMarker == block->fBlockSentinal);
168     SkDEBUGCODE(block->fBlockSentinal = kFreedMarker); // FWIW
169     sk_free(block);
170 }
171 
validate()172 void GrMemoryPool::validate() {
173 #ifdef SK_DEBUG
174     BlockHeader* block = fHead;
175     BlockHeader* prev = nullptr;
176     SkASSERT(block);
177     int allocCount = 0;
178     do {
179         SkASSERT(kAssignedMarker == block->fBlockSentinal);
180         allocCount += block->fLiveCount;
181         SkASSERT(prev == block->fPrev);
182         if (prev) {
183             SkASSERT(prev->fNext == block);
184         }
185 
186         intptr_t b = reinterpret_cast<intptr_t>(block);
187         size_t ptrOffset = block->fCurrPtr - b;
188         size_t totalSize = ptrOffset + block->fFreeSize;
189         intptr_t userStart = b + kHeaderSize;
190 
191         SkASSERT(!(b % kAlignment));
192         SkASSERT(!(totalSize % kAlignment));
193         SkASSERT(!(block->fCurrPtr % kAlignment));
194         if (fHead != block) {
195             SkASSERT(block->fLiveCount);
196             SkASSERT(totalSize >= fMinAllocSize);
197         } else {
198             SkASSERT(totalSize == block->fSize);
199         }
200         if (!block->fLiveCount) {
201             SkASSERT(ptrOffset ==  kHeaderSize);
202             SkASSERT(userStart == block->fCurrPtr);
203         } else {
204             AllocHeader* allocData = reinterpret_cast<AllocHeader*>(userStart);
205             SkASSERT(allocData->fSentinal == kAssignedMarker ||
206                      allocData->fSentinal == kFreedMarker);
207             SkASSERT(block == allocData->fHeader);
208         }
209 
210         prev = block;
211     } while ((block = block->fNext));
212     SkASSERT(allocCount == fAllocationCnt);
213     SkASSERT(fAllocationCnt == fAllocatedIDs.count());
214     SkASSERT(prev == fTail);
215     SkASSERT(fAllocBlockCnt != 0 || fSize == 0);
216 #endif
217 }
218