1 /*
2 Copyright 2010 Google Inc.
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 */
16
17 #include "GrBufferAllocPool.h"
18 #include "GrTypes.h"
19 #include "GrVertexBuffer.h"
20 #include "GrIndexBuffer.h"
21 #include "GrGpu.h"
22
23 #if GR_DEBUG
24 #define VALIDATE validate
25 #else
26 #define VALIDATE()
27 #endif
28
29 #define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 12)
30
GrBufferAllocPool(GrGpu * gpu,BufferType bufferType,bool frequentResetHint,size_t blockSize,int preallocBufferCnt)31 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
32 BufferType bufferType,
33 bool frequentResetHint,
34 size_t blockSize,
35 int preallocBufferCnt) :
36 fBlocks(GrMax(8, 2*preallocBufferCnt)) {
37
38 GrAssert(NULL != gpu);
39 fGpu = gpu;
40 fGpu->ref();
41 fGpuIsReffed = true;
42
43 fBufferType = bufferType;
44 fFrequentResetHint = frequentResetHint;
45 fBufferPtr = NULL;
46 fMinBlockSize = GrMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
47
48 fPreallocBuffersInUse = 0;
49 fFirstPreallocBuffer = 0;
50 for (int i = 0; i < preallocBufferCnt; ++i) {
51 GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize);
52 if (NULL != buffer) {
53 *fPreallocBuffers.append() = buffer;
54 buffer->ref();
55 }
56 }
57 }
58
~GrBufferAllocPool()59 GrBufferAllocPool::~GrBufferAllocPool() {
60 VALIDATE();
61 if (fBlocks.count()) {
62 GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
63 if (buffer->isLocked()) {
64 buffer->unlock();
65 }
66 }
67 while (!fBlocks.empty()) {
68 destroyBlock();
69 }
70 fPreallocBuffers.unrefAll();
71 releaseGpuRef();
72 }
73
releaseGpuRef()74 void GrBufferAllocPool::releaseGpuRef() {
75 if (fGpuIsReffed) {
76 fGpu->unref();
77 fGpuIsReffed = false;
78 }
79 }
80
reset()81 void GrBufferAllocPool::reset() {
82 VALIDATE();
83 if (fBlocks.count()) {
84 GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
85 if (buffer->isLocked()) {
86 buffer->unlock();
87 }
88 }
89 while (!fBlocks.empty()) {
90 destroyBlock();
91 }
92 if (fPreallocBuffers.count()) {
93 // must set this after above loop.
94 fFirstPreallocBuffer = (fFirstPreallocBuffer + fPreallocBuffersInUse) %
95 fPreallocBuffers.count();
96 }
97 fCpuData.realloc(fGpu->supportsBufferLocking() ? 0 : fMinBlockSize);
98 GrAssert(0 == fPreallocBuffersInUse);
99 VALIDATE();
100 }
101
unlock()102 void GrBufferAllocPool::unlock() {
103 VALIDATE();
104
105 if (NULL != fBufferPtr) {
106 BufferBlock& block = fBlocks.back();
107 if (block.fBuffer->isLocked()) {
108 block.fBuffer->unlock();
109 } else {
110 size_t flushSize = block.fBuffer->size() - block.fBytesFree;
111 flushCpuData(fBlocks.back().fBuffer, flushSize);
112 }
113 fBufferPtr = NULL;
114 }
115 VALIDATE();
116 }
117
118 #if GR_DEBUG
validate() const119 void GrBufferAllocPool::validate() const {
120 if (NULL != fBufferPtr) {
121 GrAssert(!fBlocks.empty());
122 if (fBlocks.back().fBuffer->isLocked()) {
123 GrGeometryBuffer* buf = fBlocks.back().fBuffer;
124 GrAssert(buf->lockPtr() == fBufferPtr);
125 } else {
126 GrAssert(fCpuData.get() == fBufferPtr);
127 GrAssert(fCpuData.size() == fBlocks.back().fBuffer->size());
128 }
129 } else {
130 GrAssert(fBlocks.empty() || !fBlocks.back().fBuffer->isLocked());
131 }
132 for (int i = 0; i < fBlocks.count() - 1; ++i) {
133 GrAssert(!fBlocks[i].fBuffer->isLocked());
134 }
135 }
136 #endif
137
makeSpace(size_t size,size_t alignment,const GrGeometryBuffer ** buffer,size_t * offset)138 void* GrBufferAllocPool::makeSpace(size_t size,
139 size_t alignment,
140 const GrGeometryBuffer** buffer,
141 size_t* offset) {
142 VALIDATE();
143
144 GrAssert(NULL != buffer);
145 GrAssert(NULL != offset);
146
147 if (NULL != fBufferPtr) {
148 BufferBlock& back = fBlocks.back();
149 size_t usedBytes = back.fBuffer->size() - back.fBytesFree;
150 size_t pad = GrSizeAlignUpPad(usedBytes,
151 alignment);
152 if ((size + pad) <= back.fBytesFree) {
153 usedBytes += pad;
154 *offset = usedBytes;
155 *buffer = back.fBuffer;
156 back.fBytesFree -= size + pad;
157 return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
158 }
159 }
160
161 if (!createBlock(size)) {
162 return NULL;
163 }
164 VALIDATE();
165 GrAssert(NULL != fBufferPtr);
166
167 *offset = 0;
168 BufferBlock& back = fBlocks.back();
169 *buffer = back.fBuffer;
170 back.fBytesFree -= size;
171 return fBufferPtr;
172 }
173
currentBufferItems(size_t itemSize) const174 int GrBufferAllocPool::currentBufferItems(size_t itemSize) const {
175 VALIDATE();
176 if (NULL != fBufferPtr) {
177 const BufferBlock& back = fBlocks.back();
178 size_t usedBytes = back.fBuffer->size() - back.fBytesFree;
179 size_t pad = GrSizeAlignUpPad(usedBytes, itemSize);
180 return (back.fBytesFree - pad) / itemSize;
181 } else if (fPreallocBuffersInUse < fPreallocBuffers.count()) {
182 return fMinBlockSize / itemSize;
183 }
184 return 0;
185 }
186
preallocatedBuffersRemaining() const187 int GrBufferAllocPool::preallocatedBuffersRemaining() const {
188 return fPreallocBuffers.count() - fPreallocBuffersInUse;
189 }
190
preallocatedBufferCount() const191 int GrBufferAllocPool::preallocatedBufferCount() const {
192 return fPreallocBuffers.count();
193 }
194
putBack(size_t bytes)195 void GrBufferAllocPool::putBack(size_t bytes) {
196 VALIDATE();
197 if (NULL != fBufferPtr) {
198 BufferBlock& back = fBlocks.back();
199 size_t bytesUsed = back.fBuffer->size() - back.fBytesFree;
200 if (bytes >= bytesUsed) {
201 destroyBlock();
202 bytes -= bytesUsed;
203 } else {
204 back.fBytesFree += bytes;
205 return;
206 }
207 }
208 VALIDATE();
209 GrAssert(NULL == fBufferPtr);
210 // we don't partially roll-back buffers because our VB semantics say locking
211 // a VB discards its previous content.
212 // We could honor it by being sure we use updateSubData and not lock
213 // we will roll-back fully released buffers, though.
214 while (!fBlocks.empty() &&
215 bytes >= fBlocks.back().fBuffer->size()) {
216 bytes -= fBlocks.back().fBuffer->size();
217 destroyBlock();
218 }
219 VALIDATE();
220 }
221
createBlock(size_t requestSize)222 bool GrBufferAllocPool::createBlock(size_t requestSize) {
223
224 size_t size = GrMax(requestSize, fMinBlockSize);
225 GrAssert(size >= GrBufferAllocPool_MIN_BLOCK_SIZE);
226
227 VALIDATE();
228
229 BufferBlock& block = fBlocks.push_back();
230
231 if (size == fMinBlockSize &&
232 fPreallocBuffersInUse < fPreallocBuffers.count()) {
233
234 uint32_t nextBuffer = (fPreallocBuffersInUse + fFirstPreallocBuffer) %
235 fPreallocBuffers.count();
236 block.fBuffer = fPreallocBuffers[nextBuffer];
237 block.fBuffer->ref();
238 ++fPreallocBuffersInUse;
239 } else {
240 block.fBuffer = this->createBuffer(size);
241 if (NULL == block.fBuffer) {
242 fBlocks.pop_back();
243 return false;
244 }
245 }
246
247 block.fBytesFree = size;
248 if (NULL != fBufferPtr) {
249 GrAssert(fBlocks.count() > 1);
250 BufferBlock& prev = fBlocks.fromBack(1);
251 if (prev.fBuffer->isLocked()) {
252 prev.fBuffer->unlock();
253 } else {
254 flushCpuData(prev.fBuffer,
255 prev.fBuffer->size() - prev.fBytesFree);
256 }
257 fBufferPtr = NULL;
258 }
259
260 GrAssert(NULL == fBufferPtr);
261
262 if (fGpu->supportsBufferLocking() &&
263 size > GR_GEOM_BUFFER_LOCK_THRESHOLD &&
264 (!fFrequentResetHint || requestSize > GR_GEOM_BUFFER_LOCK_THRESHOLD)) {
265 fBufferPtr = block.fBuffer->lock();
266 }
267
268 if (NULL == fBufferPtr) {
269 fBufferPtr = fCpuData.realloc(size);
270 }
271
272 VALIDATE();
273
274 return true;
275 }
276
destroyBlock()277 void GrBufferAllocPool::destroyBlock() {
278 GrAssert(!fBlocks.empty());
279
280 BufferBlock& block = fBlocks.back();
281 if (fPreallocBuffersInUse > 0) {
282 uint32_t prevPreallocBuffer = (fPreallocBuffersInUse +
283 fFirstPreallocBuffer +
284 (fPreallocBuffers.count() - 1)) %
285 fPreallocBuffers.count();
286 if (block.fBuffer == fPreallocBuffers[prevPreallocBuffer]) {
287 --fPreallocBuffersInUse;
288 }
289 }
290 GrAssert(!block.fBuffer->isLocked());
291 block.fBuffer->unref();
292 fBlocks.pop_back();
293 fBufferPtr = NULL;
294 }
295
flushCpuData(GrGeometryBuffer * buffer,size_t flushSize)296 void GrBufferAllocPool::flushCpuData(GrGeometryBuffer* buffer,
297 size_t flushSize) {
298 GrAssert(NULL != buffer);
299 GrAssert(!buffer->isLocked());
300 GrAssert(fCpuData.get() == fBufferPtr);
301 GrAssert(fCpuData.size() == buffer->size());
302 GrAssert(flushSize <= buffer->size());
303
304 bool updated = false;
305 if (fGpu->supportsBufferLocking() &&
306 flushSize > GR_GEOM_BUFFER_LOCK_THRESHOLD) {
307 void* data = buffer->lock();
308 if (NULL != data) {
309 memcpy(data, fBufferPtr, flushSize);
310 buffer->unlock();
311 updated = true;
312 }
313 }
314 buffer->updateData(fBufferPtr, flushSize);
315 }
316
createBuffer(size_t size)317 GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) {
318 if (kIndex_BufferType == fBufferType) {
319 return fGpu->createIndexBuffer(size, true);
320 } else {
321 GrAssert(kVertex_BufferType == fBufferType);
322 return fGpu->createVertexBuffer(size, true);
323 }
324 }
325
326 ////////////////////////////////////////////////////////////////////////////////
327
GrVertexBufferAllocPool(GrGpu * gpu,bool frequentResetHint,size_t bufferSize,int preallocBufferCnt)328 GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu,
329 bool frequentResetHint,
330 size_t bufferSize,
331 int preallocBufferCnt)
332 : GrBufferAllocPool(gpu,
333 kVertex_BufferType,
334 frequentResetHint,
335 bufferSize,
336 preallocBufferCnt) {
337 }
338
makeSpace(GrVertexLayout layout,int vertexCount,const GrVertexBuffer ** buffer,int * startVertex)339 void* GrVertexBufferAllocPool::makeSpace(GrVertexLayout layout,
340 int vertexCount,
341 const GrVertexBuffer** buffer,
342 int* startVertex) {
343
344 GrAssert(vertexCount >= 0);
345 GrAssert(NULL != buffer);
346 GrAssert(NULL != startVertex);
347
348 size_t vSize = GrDrawTarget::VertexSize(layout);
349 size_t offset = 0; // assign to suppress warning
350 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
351 void* ptr = INHERITED::makeSpace(vSize * vertexCount,
352 vSize,
353 &geomBuffer,
354 &offset);
355
356 *buffer = (const GrVertexBuffer*) geomBuffer;
357 GrAssert(0 == offset % vSize);
358 *startVertex = offset / vSize;
359 return ptr;
360 }
361
appendVertices(GrVertexLayout layout,int vertexCount,const void * vertices,const GrVertexBuffer ** buffer,int * startVertex)362 bool GrVertexBufferAllocPool::appendVertices(GrVertexLayout layout,
363 int vertexCount,
364 const void* vertices,
365 const GrVertexBuffer** buffer,
366 int* startVertex) {
367 void* space = makeSpace(layout, vertexCount, buffer, startVertex);
368 if (NULL != space) {
369 memcpy(space,
370 vertices,
371 GrDrawTarget::VertexSize(layout) * vertexCount);
372 return true;
373 } else {
374 return false;
375 }
376 }
377
preallocatedBufferVertices(GrVertexLayout layout) const378 int GrVertexBufferAllocPool::preallocatedBufferVertices(GrVertexLayout layout) const {
379 return INHERITED::preallocatedBufferSize() /
380 GrDrawTarget::VertexSize(layout);
381 }
382
currentBufferVertices(GrVertexLayout layout) const383 int GrVertexBufferAllocPool::currentBufferVertices(GrVertexLayout layout) const {
384 return currentBufferItems(GrDrawTarget::VertexSize(layout));
385 }
386
387 ////////////////////////////////////////////////////////////////////////////////
388
GrIndexBufferAllocPool(GrGpu * gpu,bool frequentResetHint,size_t bufferSize,int preallocBufferCnt)389 GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu,
390 bool frequentResetHint,
391 size_t bufferSize,
392 int preallocBufferCnt)
393 : GrBufferAllocPool(gpu,
394 kIndex_BufferType,
395 frequentResetHint,
396 bufferSize,
397 preallocBufferCnt) {
398 }
399
makeSpace(int indexCount,const GrIndexBuffer ** buffer,int * startIndex)400 void* GrIndexBufferAllocPool::makeSpace(int indexCount,
401 const GrIndexBuffer** buffer,
402 int* startIndex) {
403
404 GrAssert(indexCount >= 0);
405 GrAssert(NULL != buffer);
406 GrAssert(NULL != startIndex);
407
408 size_t offset = 0; // assign to suppress warning
409 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
410 void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t),
411 sizeof(uint16_t),
412 &geomBuffer,
413 &offset);
414
415 *buffer = (const GrIndexBuffer*) geomBuffer;
416 GrAssert(0 == offset % sizeof(uint16_t));
417 *startIndex = offset / sizeof(uint16_t);
418 return ptr;
419 }
420
appendIndices(int indexCount,const void * indices,const GrIndexBuffer ** buffer,int * startIndex)421 bool GrIndexBufferAllocPool::appendIndices(int indexCount,
422 const void* indices,
423 const GrIndexBuffer** buffer,
424 int* startIndex) {
425 void* space = makeSpace(indexCount, buffer, startIndex);
426 if (NULL != space) {
427 memcpy(space, indices, sizeof(uint16_t) * indexCount);
428 return true;
429 } else {
430 return false;
431 }
432 }
433
preallocatedBufferIndices() const434 int GrIndexBufferAllocPool::preallocatedBufferIndices() const {
435 return INHERITED::preallocatedBufferSize() / sizeof(uint16_t);
436 }
437
currentBufferIndices() const438 int GrIndexBufferAllocPool::currentBufferIndices() const {
439 return currentBufferItems(sizeof(uint16_t));
440 }
441